예제 #1
0
def verka(encoder: nn.Module):
    res = []
    for i, (image, lm) in enumerate(LazyLoader.celeba_test(64)):
        content = encoder(image.cuda())
        mes = UniformMeasure2D01(lm.cuda())
        pred_measures: UniformMeasure2D01 = UniformMeasure2DFactory.from_heatmap(content)
        res.append(Samples_Loss(p=1)(mes, pred_measures).item() * image.shape[0])
    return np.mean(res)/len(LazyLoader.celeba_test(1).dataset)
예제 #2
0
def nadbka(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.w300().test_loader):
        data = batch['data'].to(device)
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        content = heatmap_to_measure(encoder(data))[0]
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += ((content - landmarks).pow(2).sum(dim=2).sqrt().mean(dim=1) / eye_dist).sum().item()
    return sum_loss / len(LazyLoader.w300().test_dataset)
예제 #3
0
def verka_cardio_w2(enc):
    sum_loss = 0
    n = len(LazyLoader.cardio().test_dataset)
    for i, batch in enumerate(LazyLoader.cardio().test_loader):
        data = batch['image'].cuda()
        landmarks_ref = batch["keypoints"].cuda()
        pred = enc(data)["mes"].coord
        sum_loss += OTWasDist().forward(pred, landmarks_ref).sum().item()
    print("test brule_loss: ", sum_loss / n)
    return sum_loss / n
예제 #4
0
def test():
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.w300().test_loader):
        data = batch['data'].to(device)
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        content = encoder_HG(data)["coords"]
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += (
            (content - landmarks).pow(2).sum(dim=2).sqrt().mean(dim=1) /
            eye_dist).sum().item()
    return sum_loss / len(LazyLoader.w300().test_dataset)
예제 #5
0
def liuboff(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.w300().test_loader):
        data = batch['data'].to(device)
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        landmarks[landmarks > 1] = 0.99999
        # content = heatmap_to_measure(encoder(data))[0]
        pred_measure = UniformMeasure2D01(encoder(data)["coords"])
        target = UniformMeasure2D01(torch.clamp(landmarks, max=1))
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += (handmadew1(pred_measure, target) / eye_dist).sum().item()
    return sum_loss / len(LazyLoader.w300().test_dataset)
예제 #6
0
def verka_human(enc):
    sum_loss = 0
    n = len(LazyLoader.human36().test_dataset)
    for i, batch in enumerate(LazyLoader.human36().test_loader):
        data = batch['A'].cuda()
        landmarks = batch["paired_B"].cuda()
        pred = enc(data)["mes"].coord
        # eye_dist = landmarks[:, 45] - landmarks[:, 36]
        # eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += ((pred - landmarks).pow(2).sum(dim=2).sqrt().mean(
            dim=1)).sum().item()
    print("test brule_loss: ", sum_loss / n)
    return sum_loss / n
예제 #7
0
def verka_300w_w2(enc):
    sum_loss = 0
    n = len(LazyLoader.w300().test_dataset)
    for i, batch in enumerate(LazyLoader.w300().test_loader):
        data = batch['data'].cuda()
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        pred = enc(data)["mes"].coord
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += (OTWasDist().forward(pred, landmarks) /
                     eye_dist).sum().item()
    print("test brule_loss: ", sum_loss / n)
    return sum_loss / n
def liuboff(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.mafl().test_loader):
        data = batch['data'].to(device)
        landmarks = batch["meta"]["keypts_normalized"].cuda().type(dtype=torch.float32)
        landmarks[landmarks > 1] = 0.99999
        # content = heatmap_to_measure(encoder(data))[0]
        pred_measure = UniformMeasure2DFactory.from_heatmap(encoder(data))
        target = UniformMeasure2D01(torch.clamp(landmarks, max=1))
        eye_dist = landmarks[:, 1] - landmarks[:, 0]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        # w1_loss = (handmadew1(pred_measure, target) / eye_dist).sum().item()
        # l1_loss = ((pred_measure.coord - target.coord).pow(2).sum(dim=2).sqrt().mean(dim=1) / eye_dist).sum().item()
        # print(w1_loss, l1_loss)
        sum_loss += ((pred_measure.coord - target.coord).pow(2).sum(dim=2).sqrt().mean(dim=1) / eye_dist).sum().item()
    return sum_loss / len(LazyLoader.mafl().test_dataset)
def test(enc):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.w300().test_loader):
        data = batch['data'].to(device)
        mes = ProbabilityMeasureFabric(256).from_coord_tensor(
            batch["meta"]["keypts_normalized"]).cuda()
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        content = enc(data)
        content_xy, _ = heatmap_to_measure(content)
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += (
            (content_xy - mes.coord).pow(2).sum(dim=2).sqrt().mean(dim=1) /
            eye_dist).sum().item()
    print("test loss: ", sum_loss / len(LazyLoader.w300().test_dataset))
    return sum_loss / len(LazyLoader.w300().test_dataset)
def liuboffMAFL(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.mafl().test_loader):
        data = batch['data'].cuda()
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        landmarks[landmarks > 1] = 0.99999

        pred_measure = UniformMeasure2D01(encoder(data)["coords"])
        target = UniformMeasure2D01(torch.clamp(landmarks, max=1))

        eye_dist = landmarks[:, 1] - landmarks[:, 0]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()

        sum_loss += (handmadew1(pred_measure, target, 0.005) /
                     eye_dist).sum().item()

    return sum_loss / len(LazyLoader.mafl().test_dataset)
예제 #11
0
def verka_300w_w2_boot(enc):

    sum_loss = 0
    n = len(LazyLoader.w300().test_dataset)
    loader = torch_data.DataLoader(LazyLoader.w300().test_dataset,
                                   batch_size=16,
                                   drop_last=False,
                                   sampler=torch_data.RandomSampler(
                                       LazyLoader.w300().test_dataset,
                                       replacement=True,
                                       num_samples=n),
                                   num_workers=20)

    for i, batch in enumerate(loader):
        data = batch['data'].cuda()
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        pred = enc(data)["mes"].coord
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += (OTWasDist().forward(pred, landmarks) /
                     eye_dist).sum().item()
    # print("test brule_loss: ", sum_loss / n)
    return sum_loss / n
def train(generator, decoder, discriminator, encoder_HG, style_encoder, device,
          starting_model_number):
    latent_size = 512
    batch_size = 8
    sample_z = torch.randn(8, latent_size, device=device)
    Celeba.batch_size = batch_size
    W300DatasetLoader.batch_size = batch_size
    W300DatasetLoader.test_batch_size = 16

    test_img = next(LazyLoader.w300().loader_train_inf)["data"][:8].cuda()

    model = CondStyleGanModel(generator, StyleGANLoss(discriminator),
                              (0.001 / 4, 0.0015 / 4))

    style_opt = optim.Adam(style_encoder.parameters(),
                           lr=5e-4,
                           betas=(0.9, 0.99))
    cont_opt = optim.Adam(encoder_HG.parameters(), lr=3e-5, betas=(0.5, 0.97))

    g_transforms: albumentations.DualTransform = albumentations.Compose([
        ToNumpy(),
        NumpyBatch(
            albumentations.Compose([
                albumentations.ElasticTransform(p=0.7,
                                                alpha=150,
                                                alpha_affine=1,
                                                sigma=10),
                albumentations.ShiftScaleRotate(p=0.9, rotate_limit=15),
            ])),
        ToTensor(device),
    ])

    g_transforms_without_norm: albumentations.DualTransform = albumentations.Compose(
        [
            ToNumpy(),
            NumpyBatch(
                albumentations.Compose([
                    albumentations.ElasticTransform(p=0.3,
                                                    alpha=150,
                                                    alpha_affine=1,
                                                    sigma=10),
                    albumentations.ShiftScaleRotate(p=0.7, rotate_limit=15),
                ])),
            ToTensor(device),
        ])

    R_t = DualTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img: coord_hm_loss(
            encoder_HG(trans_dict['image'])["coords"], trans_dict['mask']))

    R_s = UnoTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img, ltnt: WR.L1("R_s")
        (ltnt, style_encoder(trans_dict['image'])))

    barycenter: UniformMeasure2D01 = UniformMeasure2DFactory.load(
        f"{Paths.default.models()}/face_barycenter_68").cuda().batch_repeat(
            batch_size)

    R_b = BarycenterRegularizer.__call__(barycenter, 1.0, 2.0, 4.0)

    tuner = GoldTuner([0.37, 2.78, 0.58, 1.43, 3.23],
                      device=device,
                      rule_eps=0.001,
                      radius=0.3,
                      active=False)

    trainer_gan = gan_trainer(model, generator, decoder, encoder_HG,
                              style_encoder, R_s, style_opt, g_transforms)
    content_trainer = content_trainer_with_gan(cont_opt, tuner, encoder_HG,
                                               R_b, R_t, model, generator,
                                               g_transforms, decoder,
                                               style_encoder)
    supervise_trainer = content_trainer_supervised(
        cont_opt, encoder_HG,
        LazyLoader.w300().loader_train_inf)

    for i in range(11000):
        WR.counter.update(i)

        requires_grad(encoder_HG, False)
        real_img = next(LazyLoader.celeba().loader).to(device)

        encoded = encoder_HG(real_img)
        internal_content = encoded["skeleton"].detach()

        trainer_gan(i, real_img, internal_content)
        # content_trainer(real_img)
        train_content(cont_opt, R_b, R_t, real_img, model, encoder_HG, decoder,
                      generator, style_encoder)
        supervise_trainer()

        encoder_ema.accumulate(encoder_HG.module, i, 0.98)
        if i % 50 == 0 and i > 0:
            encoder_ema.write_to(encoder_HG.module)

        if i % 100 == 0:
            coefs = json.load(open("../parameters/content_loss.json"))
            print(i, coefs)
            with torch.no_grad():

                # pred_measures_test, sparse_hm_test = encoder_HG(test_img)
                encoded_test = encoder_HG(test_img)
                pred_measures_test: UniformMeasure2D01 = UniformMeasure2D01(
                    encoded_test["coords"])
                heatmaper_256 = ToGaussHeatMap(256, 1.0)
                sparse_hm_test_1 = heatmaper_256.forward(
                    pred_measures_test.coord)

                latent_test = style_encoder(test_img)

                sparce_mask = sparse_hm_test_1.sum(dim=1, keepdim=True)
                sparce_mask[sparce_mask < 0.0003] = 0
                iwm = imgs_with_mask(test_img, sparce_mask)
                send_images_to_tensorboard(WR.writer, iwm, "REAL", i)

                fake_img, _ = generator(encoded_test["skeleton"], [sample_z])
                iwm = imgs_with_mask(fake_img, pred_measures_test.toImage(256))
                send_images_to_tensorboard(WR.writer, iwm, "FAKE", i)

                restored = decoder(encoded_test["skeleton"], latent_test)
                iwm = imgs_with_mask(restored, pred_measures_test.toImage(256))
                send_images_to_tensorboard(WR.writer, iwm, "RESTORED", i)

                content_test_256 = (encoded_test["skeleton"]).repeat(1, 3, 1, 1) * \
                    torch.tensor([1.0, 1.0, 0.0], device=device).view(1, 3, 1, 1)

                content_test_256 = (content_test_256 - content_test_256.min()
                                    ) / content_test_256.max()
                send_images_to_tensorboard(WR.writer,
                                           content_test_256,
                                           "HM",
                                           i,
                                           normalize=False,
                                           range=(0, 1))

        if i % 50 == 0 and i >= 0:
            test_loss = liuboff(encoder_HG)
            print("liuboff", test_loss)
            # test_loss = nadbka(encoder_HG)
            tuner.update(test_loss)
            WR.writer.add_scalar("liuboff", test_loss, i)

        if i % 10000 == 0 and i > 0:
            torch.save(
                {
                    'g': generator.module.state_dict(),
                    'd': discriminator.module.state_dict(),
                    'c': encoder_HG.module.state_dict(),
                    "s": style_encoder.state_dict(),
                    "e": encoder_ema.storage_model.state_dict()
                },
                f'{Paths.default.models()}/stylegan2_new_{str(i + starting_model_number).zfill(6)}.pt',
            )
예제 #13
0
hm_discriminator = Discriminator(image_size, input_nc=1, channel_multiplier=1)
hm_discriminator.load_state_dict(weights["dh"])
hm_discriminator = hm_discriminator.cuda()

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator, StyleGANLoss(discriminator_img), (0.001/4, 0.0015/4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg, StyleGANLoss(hm_discriminator, r1=3), (2e-5, 0.0015/4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

print(f"board path: {Paths.default.board()}/cardio{int(time.time())}")
writer = SummaryWriter(f"{Paths.default.board()}/cardio{int(time.time())}")
WR.writer = writer

#%%

test_batch = next(LazyLoader.cardio().loader_train_inf)
test_img = test_batch["image"].cuda()
test_landmarks = next(LazyLoader.cardio_landmarks(args.data_path).loader_train_inf).cuda()
test_measure = UniformMeasure2D01(torch.clamp(test_landmarks, max=1))
test_hm = heatmapper.forward(test_measure.coord).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=100000, was_coef=2000)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

for i in range(100000):

    WR.counter.update(i)
예제 #14
0
파일: hm2img.py 프로젝트: nazarblch/brule2
hg = HG_heatmap(heatmapper, num_blocks=1)
hg.load_state_dict(weights['gh'])
hg = hg.cuda()
hm_discriminator = Discriminator(image_size, input_nc=1, channel_multiplier=1)
hm_discriminator.load_state_dict(weights["dh"])
hm_discriminator = hm_discriminator.cuda()

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator, StyleGANLoss(discriminator_img), (0.001/4, 0.0015/4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg, StyleGANLoss(hm_discriminator), (2e-5, 0.0015/4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(f"{Paths.default.board()}/hm2img{int(time.time())}")
WR.writer = writer

batch = next(LazyLoader.w300().loader_train_inf)
test_img = batch["data"].cuda()
test_landmarks = torch.clamp(next(LazyLoader.w300_landmarks(args.data_path).loader_train_inf).cuda(), max=1)
test_hm = heatmapper.forward(test_landmarks).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss().cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=10000/2, was_coef=100)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

#
# fake, _ = enc_dec.generate(test_hm, test_noise)
# plt_img = torch.cat([test_img[:3], fake[:3]]).detach().cpu()
# plt_lm = torch.cat([hg.forward(test_img)["mes"].coord[:3], test_landmarks[:3]]).detach().cpu()
예제 #15
0
hg = HG_heatmap(heatmapper)
hg.load_state_dict(weights['gh'])
hg = hg.cuda()
hm_discriminator = Discriminator(image_size, input_nc=1, channel_multiplier=1)
hm_discriminator.load_state_dict(weights["dh"])
hm_discriminator = hm_discriminator.cuda()

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator, StyleGANLoss(discriminator_img), (0.001/4, 0.0015/4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg, StyleGANLoss(hm_discriminator), (2e-5, 0.0015/4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(f"{Paths.default.board()}/hm2img{int(time.time())}")
WR.writer = writer

test_img = next(LazyLoader.w300().loader_train_inf)["data"].cuda()
test_landmarks = torch.clamp(next(LazyLoader.w300augment_landmarks(args.data_path).loader_train_inf).cuda(), max=1)
test_hm = heatmapper.forward(test_landmarks).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss().cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=1000000, was_coef=2000)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)


for i in range(100000):

    WR.counter.update(i)
        ll = (bc_net(ws).reshape(-1, padding, 2) - bs).pow(2).sum() / 32
        print(ll.item())
        lll = ll.item()
        ll.backward()
        bc_net_opt.step()

    # replay_buf = replay_buf[replay_buf.__len__() - 32:]

    barycenter.coord = bc_net(weights[None, :]).reshape(1, padding, 2).detach()
    return barycenter, lll


# mafl_dataloader = LazyLoader.w300().loader_train_inf
# mes = UniformMeasure2D01(next(mafl_dataloader)['meta']['keypts_normalized'].type(torch.float32)).cuda()
mes = UniformMeasure2D01(next(iter(
    LazyLoader.celeba_test(batch_size)))[1]).cuda()

for j in range(10000):
    weights = Dirichlet(torch.ones(batch_size) / 10).sample().cuda()
    barycenter, lll = compute_wbc(mes, weights, min(200, j + 10))

    if j % 50 == 0:
        print(j)
        sced.step(lll)

    if j % 50 == 0:
        plt.imshow(barycenter.toImage(200)[0][0].detach().cpu().numpy())
        plt.show()

    starting_model_number = 0
    if j % 1000 == 0 and j > 0:
def hm_svoego_roda_loss(pred, target):

    pred_xy, _ = heatmap_to_measure(pred)
    t_xy, _ = heatmap_to_measure(target)

    return Loss(nn.BCELoss()(pred, target) +
                nn.MSELoss()(pred_xy, t_xy) * 0.0005 +
                (pred - target).abs().mean() * 0.3)


R_t = DualTransformRegularizer.__call__(
    g_transforms, lambda trans_dict, img: hm_svoego_roda_loss(
        encoder_HG(trans_dict['image']), trans_dict['mask']))

for epoch in range(130):
    for i, batch in enumerate(LazyLoader.w300().loader_train):
        # print(i)
        counter.update(i + epoch * len(LazyLoader.w300().loader_train))

        data = batch['data'].to(device)
        mes = ProbabilityMeasureFabric(256).from_coord_tensor(
            batch["meta"]["keypts_normalized"]).cuda()
        target_hm = heatmaper.forward(mes.probability, mes.coord * 63)

        content = encoder_HG(data)
        hm_svoego_roda_loss(content, target_hm).minimize_step(cont_opt)

        if i % 5 == 0:
            real_img = next(LazyLoader.celeba().loader).to(device)
            content = encoder_HG(data)
            coefs = json.load(open("../parameters/content_loss_sup.json"))
def train(generator, decoder, discriminator, encoder_HG, style_encoder, device, starting_model_number):
    latent_size = 512
    batch_size = 12
    sample_z = torch.randn(8, latent_size, device=device)
    MAFL.batch_size = batch_size
    MAFL.test_batch_size = 64
    Celeba.batch_size = batch_size

    test_img = next(LazyLoader.mafl().loader_train_inf)["data"][:8].cuda()

    loss_st: StyleGANLoss = StyleGANLoss(discriminator)
    model = CondStyleGanModel(generator, loss_st, (0.001, 0.0015))

    style_opt = optim.Adam(style_encoder.parameters(), lr=5e-4, betas=(0.9, 0.99))
    cont_opt = optim.Adam(encoder_HG.parameters(), lr=2e-5, betas=(0.5, 0.97))

    g_transforms: albumentations.DualTransform = albumentations.Compose([
        ToNumpy(),
        NumpyBatch(albumentations.Compose([
            ResizeMask(h=256, w=256),
            albumentations.ElasticTransform(p=0.7, alpha=150, alpha_affine=1, sigma=10),
            albumentations.ShiftScaleRotate(p=0.7, rotate_limit=15),
            ResizeMask(h=64, w=64),
            NormalizeMask(dim=(0, 1, 2))
        ])),
        ToTensor(device),
    ])

    R_t = DualTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img:
        # rt_loss(encoder_HG(trans_dict['image']), trans_dict['mask'])
        stariy_hm_loss(encoder_HG(trans_dict['image']), trans_dict['mask'])
    )

    R_s = UnoTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img, ltnt:
        L1("R_s")(ltnt, style_encoder(trans_dict['image']))
    )

    barycenter: UniformMeasure2D01 = UniformMeasure2DFactory.load(
        f"{Paths.default.models()}/face_barycenter_5").cuda().batch_repeat(batch_size)

    R_b = BarycenterRegularizer.__call__(barycenter, 1.0, 2.0, 4.0)
    tuner = GoldTuner([0.37, 1.55, 0.9393, 0.1264, 1.7687, 0.8648, 1.8609], device=device, rule_eps=0.01 / 2,
                      radius=0.1, active=True)

    heatmaper = ToGaussHeatMap(64, 1.0)
    sparse_bc = heatmaper.forward(barycenter.coord * 63)
    sparse_bc = nn.Upsample(scale_factor=4)(sparse_bc).sum(dim=1, keepdim=True).repeat(1, 3, 1, 1) * \
                torch.tensor([1.0, 1.0, 0.0], device=device).view(1, 3, 1, 1)
    sparse_bc = (sparse_bc - sparse_bc.min()) / sparse_bc.max()
    send_images_to_tensorboard(writer, sparse_bc, "BC", 0, normalize=False, range=(0, 1))

    trainer_gan = gan_trainer(model, generator, decoder, encoder_HG, style_encoder, R_s, style_opt, heatmaper,
                              g_transforms)
    content_trainer = content_trainer_with_gan(cont_opt, tuner, heatmaper, encoder_HG, R_b, R_t, model, generator,
                                               g_transforms)
    supervise_trainer = content_trainer_supervised(cont_opt, encoder_HG, LazyLoader.mafl().loader_train_inf)

    for i in range(100000):
        counter.update(i)

        requires_grad(encoder_HG, False)  # REMOVE BEFORE TRAINING
        real_img = next(LazyLoader.mafl().loader_train_inf)["data"].to(device) \
            if i % 5 == 0 else next(LazyLoader.celeba().loader).to(device)

        img_content = encoder_HG(real_img)
        pred_measures: UniformMeasure2D01 = UniformMeasure2DFactory.from_heatmap(img_content)
        sparse_hm = heatmaper.forward(pred_measures.coord * 63).detach()
        trainer_gan(i, real_img, pred_measures.detach(), sparse_hm.detach(), apply_g=False)
        supervise_trainer()

        if i % 4 == 0:
            # real_img = next(LazyLoader.mafl().loader_train_inf)["data"].to(device)
            trainer_gan(i, real_img, pred_measures.detach(), sparse_hm.detach(), apply_g=True)
            content_trainer(real_img)

        if i % 100 == 0:
            coefs = json.load(open("../parameters/content_loss.json"))
            print(i, coefs)
            with torch.no_grad():
                # pred_measures_test, sparse_hm_test = encoder_HG(test_img)
                content_test = encoder_HG(test_img)
                pred_measures_test: UniformMeasure2D01 = UniformMeasure2DFactory.from_heatmap(content_test)
                heatmaper_256 = ToGaussHeatMap(256, 2.0)
                sparse_hm_test = heatmaper.forward(pred_measures_test.coord * 63)
                sparse_hm_test_1 = heatmaper_256.forward(pred_measures_test.coord * 255)

                latent_test = style_encoder(test_img)

                sparce_mask = sparse_hm_test_1.sum(dim=1, keepdim=True)
                sparce_mask[sparce_mask < 0.0003] = 0
                iwm = imgs_with_mask(test_img, sparce_mask)
                send_images_to_tensorboard(writer, iwm, "REAL", i)

                fake_img, _ = generator(sparse_hm_test, [sample_z])
                iwm = imgs_with_mask(fake_img, pred_measures_test.toImage(256))
                send_images_to_tensorboard(writer, iwm, "FAKE", i)

                restored = decoder(sparse_hm_test, latent_test)
                iwm = imgs_with_mask(restored, pred_measures_test.toImage(256))
                send_images_to_tensorboard(writer, iwm, "RESTORED", i)

                content_test_256 = nn.Upsample(scale_factor=4)(sparse_hm_test).sum(dim=1, keepdim=True).repeat(1, 3, 1,
                                                                                                               1) * \
                                   torch.tensor([1.0, 1.0, 0.0], device=device).view(1, 3, 1, 1)

                content_test_256 = (content_test_256 - content_test_256.min()) / content_test_256.max()
                send_images_to_tensorboard(writer, content_test_256, "HM", i, normalize=False, range=(0, 1))

        if i % 50 == 0 and i >= 0:
            test_loss = liuboff(encoder_HG)
            # test_loss = nadbka(encoder_HG)
            tuner.update(test_loss)
            writer.add_scalar("liuboff", test_loss, i)

        if i % 10000 == 0 and i > 0:
            torch.save(
                {
                    'g': generator.module.state_dict(),
                    'd': discriminator.module.state_dict(),
                    'c': encoder_HG.module.state_dict(),
                    "s": style_encoder.state_dict()
                },
                f'{Paths.default.models()}/stylegan2_MAFL_{str(i + starting_model_number).zfill(6)}.pt',
            )
예제 #19
0
from dataset.lazy_loader import LazyLoader, W300DatasetLoader, CelebaWithKeyPoints, Celeba
from sklearn.neighbors import NearestNeighbors
import numpy as np
import matplotlib.pyplot as plt
from dataset.toheatmap import ToGaussHeatMap
from dataset.probmeasure import UniformMeasure2D01
import pandas as pd
import networkx as nx
import ot
from barycenters.sampler import Uniform2DBarycenterSampler, Uniform2DAverageSampler
from parameters.path import Paths
from joblib import Parallel, delayed

N = 701
dataset = LazyLoader.cardio_landmarks(f"cardio_{N}/lm").dataset_train
D = np.load(f"{Paths.default.models()}/cardio_graph{N}.npy")
padding = 200
prob = np.ones(padding) / padding
NS = 1000


def LS(k):
    return dataset[k].numpy()


ls = np.asarray([LS(k) for k in range(N)])


def viz_mes(ms):
    heatmaper = ToGaussHeatMap(128, 1)
def train(generator, decoder, discriminator, encoder_HG, style_encoder, device,
          starting_model_number):
    latent_size = 512
    batch_size = 24
    sample_z = torch.randn(8, latent_size, device=device)
    Celeba.batch_size = batch_size
    test_img = next(LazyLoader.celeba().loader)[:8].cuda()

    loss_st: StyleGANLoss = StyleGANLoss(discriminator)
    model = CondStyleGanModel(generator, loss_st, (0.001, 0.0015))

    style_opt = optim.Adam(style_encoder.parameters(),
                           lr=5e-4,
                           betas=(0.9, 0.99))
    cont_opt = optim.Adam(encoder_HG.parameters(), lr=4e-5, betas=(0.5, 0.97))

    g_transforms: albumentations.DualTransform = albumentations.Compose([
        ToNumpy(),
        NumpyBatch(
            albumentations.Compose([
                ResizeMask(h=256, w=256),
                albumentations.ElasticTransform(p=0.7,
                                                alpha=150,
                                                alpha_affine=1,
                                                sigma=10),
                albumentations.ShiftScaleRotate(p=0.7, rotate_limit=15),
                ResizeMask(h=64, w=64),
                NormalizeMask(dim=(0, 1, 2))
            ])),
        ToTensor(device),
    ])

    R_t = DualTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img: hm_svoego_roda_loss(
            encoder_HG(trans_dict['image']), trans_dict['mask'], 1000, 0.3))

    R_s = UnoTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img, ltnt: L1("R_s")
        (ltnt, style_encoder(trans_dict['image'])))

    barycenter: UniformMeasure2D01 = UniformMeasure2DFactory.load(
        f"{Paths.default.models()}/face_barycenter_68").cuda().batch_repeat(
            batch_size)
    # plt.imshow(barycenter.toImage(256)[0][0].detach().cpu().numpy())
    # plt.show()

    R_b = BarycenterRegularizer.__call__(barycenter, 1.0, 2.0, 3.0)

    #                  4.5, 1.2, 1.12, 1.4, 0.07, 2.2
    #                  1.27, 3.55, 5.88, 3.83, 2.17, 0.22, 1.72
    tuner = GoldTuner([2.2112, 2.3467, 3.8438, 3.2202, 2.0494, 0.0260, 5.8378],
                      device=device,
                      rule_eps=0.03,
                      radius=1,
                      active=True)
    # tuner_verka = GoldTuner([3.0, 1.2, 2.0], device=device, rule_eps=0.05, radius=1, active=True)

    best_igor = 100
    heatmaper = ToGaussHeatMap(64, 1.5)

    trainer_gan = gan_trainer(model, generator, decoder, encoder_HG,
                              style_encoder, R_s, style_opt, heatmaper,
                              g_transforms)
    content_trainer = content_trainer_with_gan(cont_opt, tuner, heatmaper,
                                               encoder_HG, R_b, R_t, model,
                                               generator)

    for i in range(100000):
        counter.update(i)

        requires_grad(encoder_HG, False)  # REMOVE BEFORE TRAINING
        real_img = next(LazyLoader.celeba().loader).to(device)

        img_content = encoder_HG(real_img).detach()
        pred_measures: UniformMeasure2D01 = UniformMeasure2DFactory.from_heatmap(
            img_content)
        sparce_hm = heatmaper.forward(pred_measures.coord * 63).detach()

        trainer_gan(i, real_img, img_content, sparce_hm)

        if i % 3 == 0:
            real_img = next(LazyLoader.celeba().loader).to(device)
            content_trainer(real_img)

        if i % 100 == 0:
            coefs = json.load(open("../parameters/content_loss_sup.json"))
            print(i, coefs)
            with torch.no_grad():

                content_test = encoder_HG(test_img)
                latent_test = style_encoder(test_img)
                pred_measures = UniformMeasure2DFactory.from_heatmap(
                    content_test)
                sparce_hm = heatmaper.forward(pred_measures.coord *
                                              63).detach()

                iwm = imgs_with_mask(test_img, pred_measures.toImage(256))
                send_images_to_tensorboard(writer, iwm, "REAL", i)

                fake_img, _ = generator(sparce_hm, [sample_z])
                iwm = imgs_with_mask(fake_img, pred_measures.toImage(256))
                send_images_to_tensorboard(writer, iwm, "FAKE", i)

                restored = decoder(sparce_hm, latent_test)
                iwm = imgs_with_mask(restored, pred_measures.toImage(256))
                send_images_to_tensorboard(writer, iwm, "RESTORED", i)

                content_test_256 = nn.Upsample(
                    scale_factor=4)(content_test).sum(dim=1, keepdim=True)
                content_test_256 = content_test_256 / content_test_256.max()
                send_images_to_tensorboard(writer, content_test_256, "HM", i)

        if i % 50 == 0 and i > 0:
            test_loss = verka(encoder_HG)
            tuner.update(test_loss)
            writer.add_scalar("verka", test_loss, i)

        if i % 10000 == 0 and i > 0:
            torch.save(
                {
                    'g': generator.module.state_dict(),
                    'd': discriminator.module.state_dict(),
                    'c': encoder_HG.module.state_dict(),
                    "s": style_encoder.state_dict()
                },
                f'{Paths.default.models()}/stylegan2_new_{str(i + starting_model_number).zfill(6)}.pt',
            )
    generator.load_state_dict(weights['g'])
    # style_encoder.load_state_dict(weights['s'])
    encoder_HG.load_state_dict(weights['c'])

    generator = generator.cuda()
    # discriminator = discriminator.to(device)
    encoder_HG = encoder_HG.cuda()
    # style_encoder = style_encoder.cuda()
    decoder = CondGenDecode(generator)

    # generator = nn.DataParallel(generator, [0, 1, 3])
    # discriminator = nn.DataParallel(discriminator, [0, 1, 3])
    # encoder_HG = nn.DataParallel(encoder_HG, [0, 1, 3])
    # decoder = nn.DataParallel(decoder, [0, 1, 3])

    test_img = next(LazyLoader.celeba().loader)[:4].cuda()
    heatmaper = ToGaussHeatMap(64, 1.5)
    sample_z = torch.randn(4, 512, device=device)
    noise = mixing_noise(4, 512, 0.9, device)

    LazyLoader.w300_save = None
    W300DatasetLoader.test_batch_size = 4
    w300_test = next(iter(LazyLoader.w300().test_loader))
    w300_test_image = w300_test['data'].to(device)[:4]
    w300_test_mask = ProbabilityMeasureFabric(256).from_coord_tensor(
        w300_test["meta"]["keypts_normalized"][:4].to(device))
    sparse_hm = heatmaper.forward(w300_test_mask.coord * 63).detach()

    g_transforms: albumentations.DualTransform = albumentations.Compose([
        ToNumpy(),
        NumpyBatch(
예제 #22
0
from torchvision.utils import make_grid
from dataset.lazy_loader import LazyLoader, W300DatasetLoader, CelebaWithKeyPoints, Celeba
from sklearn.neighbors import NearestNeighbors
import numpy as np
import matplotlib.pyplot as plt
from dataset.toheatmap import ToGaussHeatMap
from dataset.probmeasure import UniformMeasure2D01
import pandas as pd
import networkx as nx
import ot
from barycenters.sampler import Uniform2DBarycenterSampler, Uniform2DAverageSampler, ImageBarycenterSampler
from parameters.path import Paths
from joblib import Parallel, delayed

N = 100
dataset = LazyLoader.w300().dataset_train
D = np.load(f"{Paths.default.models()}/w300graph{N}.npy")
padding = 68
prob = np.ones(padding) / padding
NS = 7000


def LS(k):
    return dataset[k]["meta"]['keypts_normalized'].numpy()


ls = []
images = []

for k in range(N):
    dk = dataset[k]
예제 #23
0
def train(generator, discriminator, encoder, style_encoder, device,
          starting_model_number):

    batch = 32
    Celeba.batch_size = batch

    latent_size = 512
    model = CondStyleGanModel(generator, StyleGANLoss(discriminator),
                              (0.001, 0.0015))

    style_opt = optim.Adam(style_encoder.parameters(),
                           lr=5e-4,
                           betas=(0.5, 0.97))

    g_transforms: albumentations.DualTransform = albumentations.Compose([
        ToNumpy(),
        NumpyBatch(
            albumentations.ElasticTransform(p=0.8,
                                            alpha=150,
                                            alpha_affine=1,
                                            sigma=10)),
        NumpyBatch(albumentations.ShiftScaleRotate(p=0.5, rotate_limit=10)),
        ToTensor(device)
    ])

    R_s = UnoTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img, ltnt: L1("R_s")
        (ltnt, style_encoder(trans_dict['image'])))

    sample_z = torch.randn(batch, latent_size, device=device)
    test_img = next(LazyLoader.celeba().loader).to(device)
    print(test_img.shape)
    test_cond = encoder(test_img)

    requires_grad(encoder, False)  # REMOVE BEFORE TRAINING

    t_start = time.time()

    for i in range(100000):
        counter.update(i)
        real_img = next(LazyLoader.celeba().loader).to(device)

        img_content = encoder(real_img).detach()

        noise = mixing_noise(batch, latent_size, 0.9, device)
        fake, _ = generator(img_content, noise)

        model.discriminator_train([real_img], [fake.detach()], img_content)

        writable("Generator loss", model.generator_loss)([real_img], [fake], [], img_content)\
            .minimize_step(model.optimizer.opt_min)

        # print("gen train", time.time() - t1)

        if i % 5 == 0 and i > 0:
            noise = mixing_noise(batch, latent_size, 0.9, device)

            img_content = encoder(real_img).detach()
            fake, fake_latent = generator(img_content,
                                          noise,
                                          return_latents=True)

            fake_latent_test = fake_latent[:, [0, 13], :].detach()
            fake_latent_pred = style_encoder(fake)
            fake_content_pred = encoder(fake)

            restored = generator.module.decode(
                img_content[:batch // 2], style_encoder(real_img[:batch // 2]))
            (HMLoss("BCE content gan", 5000)(fake_content_pred, img_content) +
             L1("L1 restored")(restored, real_img[:batch // 2]) * 50 +
             L1("L1 style gan")(fake_latent_pred, fake_latent_test) * 30 +
             R_s(fake.detach(), fake_latent_pred) * 50).minimize_step(
                 model.optimizer.opt_min, style_opt)

        if i % 100 == 0:
            t_100 = time.time()
            print(i, t_100 - t_start)
            t_start = time.time()
            with torch.no_grad():

                fake_img, _ = generator(test_cond, [sample_z])
                coords, p = heatmap_to_measure(test_cond)
                test_mes = ProbabilityMeasure(p, coords)
                iwm = imgs_with_mask(fake_img, test_mes.toImage(256))
                send_images_to_tensorboard(writer, iwm, "FAKE", i)

                iwm = imgs_with_mask(test_img, test_mes.toImage(256))
                send_images_to_tensorboard(writer, iwm, "REAL", i)

                restored = generator.module.decode(test_cond,
                                                   style_encoder(test_img))
                send_images_to_tensorboard(writer, restored, "RESTORED", i)

        if i % 10000 == 0 and i > 0:
            torch.save(
                {
                    'g': generator.state_dict(),
                    'd': discriminator.state_dict(),
                    'style': style_encoder.state_dict()
                    # 'enc': cont_style_encoder.state_dict(),
                },
                f'/trinity/home/n.buzun/PycharmProjects/saved/stylegan2_w300_{str(starting_model_number + i).zfill(6)}.pt',
            )
예제 #24
0
hg = HG_heatmap(heatmapper, num_blocks=1, num_classes=200)
hg.load_state_dict(weights['gh'])
hg = hg.cuda()
cont_opt = optim.Adam(hg.parameters(), lr=2e-5, betas=(0, 0.8))

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator,
                                               StyleGANLoss(discriminator_img),
                                               (0.001 / 4, 0.0015 / 4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(
    f"{Paths.default.board()}/brule1_cardio_{int(time.time())}")
WR.writer = writer

batch = next(LazyLoader.cardio().loader_train_inf)

batch_test = next(iter(LazyLoader.cardio().test_loader))
test_img = batch["image"].cuda()

test_landmarks = batch["keypoints"].cuda()
test_measure = UniformMeasure2D01(torch.clamp(test_landmarks, max=1))
test_hm = heatmapper.forward(test_measure.coord).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=10000, was_coef=100)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)
예제 #25
0
from dataset.lazy_loader import LazyLoader, W300DatasetLoader, CelebaWithKeyPoints, Celeba
from sklearn.neighbors import NearestNeighbors
import numpy as np
import matplotlib.pyplot as plt
from dataset.toheatmap import ToGaussHeatMap
from dataset.probmeasure import UniformMeasure2D01
import pandas as pd
import networkx as nx
import ot
from barycenters.sampler import Uniform2DBarycenterSampler, Uniform2DAverageSampler
from parameters.path import Paths
from joblib import Parallel, delayed

N = 300
dataset = LazyLoader.cardio().dataset_train
padding = 200
prob = np.ones(padding) / padding

data_ids = np.random.permutation(np.arange(0, 700))[0:N]


def LS(k):
    return dataset[k]['keypoints'].numpy()


ls = [LS(k) for k in data_ids]

bc_sampler = Uniform2DBarycenterSampler(padding, dir_alpha=1.0)

bc = bc_sampler.mean(ls)
예제 #26
0
파일: celeba.py 프로젝트: nazarblch/brule2
discriminator = discriminator.cuda()

gan_model = StyleGanModel(generator, StyleGANLoss(discriminator),
                          (0.001, 0.0015))
gan_accumulator = Accumulator(generator, decay=0.99, write_every=100)

writer = SummaryWriter(f"{Paths.default.board()}/celeba{int(time.time())}")

print("Starting Training Loop...")
starting_model_number = 0

for i in range(300000):

    print(i)

    real_img = next(LazyLoader.celeba().loader).to(device)

    noise: List[Tensor] = mixing_noise(batch_size, noise_size, 0.9, device)
    fake, latent = generator.forward(noise, return_latents=True)

    gan_model.discriminator_train([real_img], [fake.detach()])
    gan_model.generator_loss_with_penalty(
        [real_img], [fake], latent).minimize_step(gan_model.optimizer.opt_min)
    # gan_model.generator_loss([real_img], [fake]).minimize_step(gan_model.optimizer.opt_min)

    gan_accumulator.step(i)

    if i % 100 == 0:
        print(i)
        with torch.no_grad():
            fake_test, _ = generator.forward([test_sample_z])
예제 #27
0
        f'{Paths.default.models()}/stylegan2_new_{str(starting_model_number).zfill(6)}.pt',
        # f'{Paths.default.nn()}/stylegan2_w300_{str(starting_model_number).zfill(6)}.pt',
        map_location="cpu")

    generator.load_state_dict(weights['g'])
    style_encoder.load_state_dict(weights['s'])
    encoder_HG.load_state_dict(weights['e'])

    encoder_ema.storage_model.load_state_dict(weights['e'])

    generator = generator.cuda()
    encoder_HG = encoder_HG.cuda()
    style_encoder = style_encoder.cuda()
    decoder = ConditionalDecoder(generator)

    test_img = next(LazyLoader.w300().loader_train_inf)["data"][:8].cuda()

    with torch.no_grad():
        # pred_measures_test, sparse_hm_test = encoder_HG(test_img)
        encoded_test = encoder_HG(test_img)
        pred_measures_test: UniformMeasure2D01 = UniformMeasure2D01(
            encoded_test["coords"])
        heatmaper_256 = ToGaussHeatMap(256, 1.0)
        sparse_hm_test_1 = heatmaper_256.forward(pred_measures_test.coord)

        latent_test = style_encoder(test_img)

        sparce_mask = sparse_hm_test_1.sum(dim=1, keepdim=True)
        sparce_mask[sparce_mask < 0.0003] = 0
        iwm = imgs_with_mask(test_img, sparce_mask)
        send_images_to_tensorboard(WR.writer, iwm, "REAL", i)
예제 #28
0
def train(generator, decoder, discriminator, discriminatorHG, skeleton_encoder, style_encoder, device, starting_model_number):
    latent_size = 512
    batch_size = 12
    sample_z = torch.randn(8, latent_size, device=device)
    Celeba.batch_size = batch_size
    W300DatasetLoader.batch_size = batch_size
    W300DatasetLoader.test_batch_size = 16

    encoder_HG_supervised = IXHG(num_classes=68, heatmap_size=64)
    encoder_HG_supervised.load_state_dict(torch.load(f'{Paths.default.models()}/hg2_e29.pt', map_location="cpu"))
    encoder_HG_supervised = encoder_HG_supervised.cuda()

    requires_grad(encoder_HG_supervised, False)

    style_opt = optim.Adam(style_encoder.parameters(), lr=5e-4, betas=(0.9, 0.99))

    test_img = next(LazyLoader.celeba().loader)[:8].cuda()

    loss_st: StyleGANLoss = StyleGANLoss(discriminator)
    # model = CondStyleGanModel(generator, loss_st, (0.001, 0.0015))
    #
    loss_st2: StyleGANLoss = StyleGANLoss(discriminatorHG)
    model2 = CondStyleGanModel(skeleton_encoder, loss_st2, (0.0001, 0.001))

    skeletoner = CoordToGaussSkeleton(size, 4)

    # tuda_trainer = gan_tuda_trainer(model, generator)
    obratno_trainer = gan_obratno_trainer(model2)
    # tuda_obratno_trainer = gan_tuda_obratno_trainer(generator, encoder_HG, decoder, style_encoder, style_opt)

    for i in range(100000):
        counter.update(i)

        # requires_grad(encoder_HG, False)  # REMOVE BEFORE TRAINING
        real_img = next(LazyLoader.celeba().loader).to(device)
        heatmap = encoder_HG_supervised.get_heatmaps(real_img).detach()
        coords, p = heatmap_to_measure(heatmap)
        skeleton = skeletoner.forward(coords).sum(dim=1, keepdim=True)

        # tuda_trainer(real_img, heatmap)
        obratno_trainer(real_img, skeleton)
        # tuda_obratno_trainer(real_img, heatmap)

        # if i % 10 == 0:
        #     accumulate(encoder_ema, encoder_HG.module, 0.95)

        # pred_hm = encoder_HG(real_img)
        # stariy_hm_loss(pred_hm, heatmap).minimize_step(model2.optimizer.opt_min)

        if i % 100 == 0 and i >= 0:
            with torch.no_grad():

                content_test = skeleton_encoder(test_img)["skeleton"].sum(dim=1, keepdim=True)

                iwm = imgs_with_mask(test_img, content_test, border=0.1)
                send_images_to_tensorboard(writer, iwm, "REAL", i)

                # fake_img, _ = generator(sparse_hm_test, [sample_z])
                # iwm = imgs_with_mask(fake_img, pred_measures_test.toImage(256))
                # send_images_to_tensorboard(writer, iwm, "FAKE", i)
                #
                # restored = decoder(sparse_hm_test, latent_test)
                # iwm = imgs_with_mask(restored, pred_measures_test.toImage(256))
                # send_images_to_tensorboard(writer, iwm, "RESTORED", i)

                content_test_hm = (content_test - content_test.min()) / content_test.max()

                send_images_to_tensorboard(writer, content_test_hm, "HM", i, normalize=False, range=(0, 1))

                heatmap_test = encoder_HG_supervised.get_heatmaps(test_img).detach()
                coords_test, _ = heatmap_to_measure(heatmap_test)
                skeleton_test = skeletoner.forward(coords_test).sum(dim=1, keepdim=True)
                iwm = imgs_with_mask(test_img, skeleton_test, border=0.1)
                send_images_to_tensorboard(writer, iwm, "REF", i)


        if i % 50 == 0 and i >= 0:
            test_loss = liuboff(skeleton_encoder)
            print("liuboff", test_loss)
            # test_loss = nadbka(encoder_HG)
            # tuner.update(test_loss)
            writer.add_scalar("liuboff", test_loss, i)

        if i % 10000 == 0 and i > 0:
            torch.save(
                {
                    'g': generator.module.state_dict(),
                    'd': discriminator.module.state_dict(),
                    'c': skeleton_encoder.module.state_dict(),
                    "s": style_encoder.state_dict(),
                    "d2": discriminatorHG.module.state_dict(),
                    # "ema": encoder_ema.state_dict()
                },
                f'{Paths.default.models()}/cyclegan_{str(i + starting_model_number).zfill(6)}.pt',
            )
예제 #29
0
import sys, os

from dataset.d300w import ThreeHundredW
import ot

from dataset.lazy_loader import Cardio, LazyLoader
from parameters.path import Paths
import numpy as np
from torch.utils.data import Subset

image_size = 256
padding = 200
N = 300
prob = np.ones(padding) / padding

dataset_train = LazyLoader.cardio_landmarks("cardio_300/lm").dataset_train


def load_landmarks(k):
    return dataset_train[k].numpy()


landmarks = [load_landmarks(k) for k in range(N)]


def compute_w2(i, j):
    M_ij = ot.dist(landmarks[i], landmarks[j])
    D_ij = ot.emd2(prob, prob, M_ij)
    return D_ij

    ToTensor(device)
])

R_s = UnoTransformRegularizer.__call__(
    g_transforms, lambda trans_dict, img, ltnt: Loss(nn.L1Loss()(
        ltnt, style_encoder(trans_dict['image']))))

W300DatasetLoader.batch_size = 24
W300DatasetLoader.test_batch_size = 64
Celeba.batch_size = 24

heatmaper = ToHeatMap(64)

# tuner = GoldTuner([1.0, 1.0], device=device, rule_eps=0.02, radius=0.5, active=True)

w300_test = next(iter(LazyLoader.w300().test_loader))
w300_test_image = w300_test['data'].to(device)[:8]


def hm_svoego_roda_loss(pred, target, coef=1.0, l1_coef=0.0):
    pred_mes = UniformMeasure2DFactory.from_heatmap(pred)
    target_mes = UniformMeasure2DFactory.from_heatmap(target)

    # pred = pred.relu() + 1e-15
    # target[target < 1e-7] = 0
    # target[target > 1 - 1e-7] = 1

    if torch.isnan(pred).any() or torch.isnan(target).any():
        print("nan in hm")
        return Loss.ZERO()