コード例 #1
0
def liuboff(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.w300().test_loader):
        data = batch['data'].to(device)
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        landmarks[landmarks > 1] = 0.99999
        # content = heatmap_to_measure(encoder(data))[0]
        pred_measure = UniformMeasure2D01(encoder(data)["coords"])
        target = UniformMeasure2D01(torch.clamp(landmarks, max=1))
        eye_dist = landmarks[:, 45] - landmarks[:, 36]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += (handmadew1(pred_measure, target) / eye_dist).sum().item()
    return sum_loss / len(LazyLoader.w300().test_dataset)
コード例 #2
0
    def do_train():

        requires_grad(encoder_HG, True)
        w300_batch = next(loader)
        w300_image = w300_batch['data'].cuda()
        landmarks = w300_batch["meta"]["keypts_normalized"].cuda()
        w300_mes = UniformMeasure2D01(torch.clamp(landmarks, max=1))
        pred_coord = encoder_HG(w300_image)["coords"]
        pred_mes = UniformMeasure2D01(pred_coord)

        coefs = json.load(open("../parameters/content_loss.json"))

        WR.writable("W300 Loss", sup_loss)(pred_mes, w300_mes).__mul__(coefs["borj4_w300"])\
            .minimize_step(cont_opt)
コード例 #3
0
    def do_train(real_img):

        B = real_img.shape[0]

        requires_grad(encoder_HG, True)
        requires_grad(decoder, False)

        coefs = json.load(open("../parameters/content_loss.json"))
        encoded = encoder_HG(real_img)
        pred_measures: UniformMeasure2D01 = UniformMeasure2D01(encoded["coords"])

        heatmap_content = heatmapper.forward(encoded["coords"]).detach()

        restored = decoder(encoded["skeleton"], style_encoder(real_img))

        noise = mixing_noise(B, C, 0.9, real_img.device)
        fake, _ = generator(encoded["skeleton"], noise)
        fake_content = encoder_HG(fake.detach())["coords"]

        ll = (
                WR.writable("R_b", R_b.__call__)(real_img, pred_measures) * coefs["R_b"] +
                WR.writable("R_t", R_t.__call__)(real_img, heatmap_content) * coefs["R_t"] +
                WR.L1("L1 image")(restored, real_img) * coefs["L1 image"] +
                WR.writable("fake_content loss", coord_hm_loss)(
                    fake_content, heatmap_content
                ) * coefs["fake_content loss"] +
                WR.writable("Fake-content D", model.loss.generator_loss)(
                    real=None,
                    fake=[fake, encoded["skeleton"].detach()]) * coefs["Fake-content D"]
        )

        ll.minimize_step(cont_opt)
コード例 #4
0
def liuboffMAFL(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.mafl().test_loader):
        data = batch['data'].cuda()
        landmarks = batch["meta"]["keypts_normalized"].cuda()
        landmarks[landmarks > 1] = 0.99999

        pred_measure = UniformMeasure2D01(encoder(data)["coords"])
        target = UniformMeasure2D01(torch.clamp(landmarks, max=1))

        eye_dist = landmarks[:, 1] - landmarks[:, 0]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()

        sum_loss += (handmadew1(pred_measure, target, 0.005) /
                     eye_dist).sum().item()

    return sum_loss / len(LazyLoader.mafl().test_dataset)
コード例 #5
0
def verka(encoder: nn.Module):
    res = []
    for i, (image, lm) in enumerate(LazyLoader.celeba_test(64)):
        content = encoder(image.cuda())
        mes = UniformMeasure2D01(lm.cuda())
        pred_measures: UniformMeasure2D01 = UniformMeasure2DFactory.from_heatmap(content)
        res.append(Samples_Loss(p=1)(mes, pred_measures).item() * image.shape[0])
    return np.mean(res)/len(LazyLoader.celeba_test(1).dataset)
コード例 #6
0
ファイル: cfinder.py プロジェクト: nazarblch/brule2
    def get_conturs_batch(mask: Tensor):

        B = mask.shape[0]
        mask = mask.cpu().numpy()

        coord = torch.cat([
            torch.from_numpy(ContFinder.get_contours(mask[i]))[None, ]
            for i in range(B)
        ]).cuda()
        return UniformMeasure2D01(coord / (mask.shape[-1] - 1))
コード例 #7
0
    def forward(self, image: Tensor):
        B, C, D, D = image.shape
        heatmaps: List[Tensor] = self.model.forward(image)
        out = heatmaps[-1]

        coords = self.hm_to_coord(out)
        sk = self.skeletoner.forward(coords).sum(dim=1, keepdim=True)

        assert coords.max().item() is not None
        assert coords.max().item() < 2

        return {"mes": UniformMeasure2D01(coords), "skeleton": sk}
コード例 #8
0
    def forward(self, image: Tensor):
        B, C, D, D = image.shape
        heatmaps: List[Tensor] = self.model.forward(image)
        out = heatmaps[-1]

        hm = self.up(self.postproc(out))
        coords, _ = heatmap_to_measure(hm)

        return {
            "out": out,
            "mes": UniformMeasure2D01(coords),
            'hm': hm,
            "softmax": self.postproc(out)
        }
def liuboff(encoder: nn.Module):
    sum_loss = 0
    for i, batch in enumerate(LazyLoader.mafl().test_loader):
        data = batch['data'].to(device)
        landmarks = batch["meta"]["keypts_normalized"].cuda().type(dtype=torch.float32)
        landmarks[landmarks > 1] = 0.99999
        # content = heatmap_to_measure(encoder(data))[0]
        pred_measure = UniformMeasure2DFactory.from_heatmap(encoder(data))
        target = UniformMeasure2D01(torch.clamp(landmarks, max=1))
        eye_dist = landmarks[:, 1] - landmarks[:, 0]
        eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        # w1_loss = (handmadew1(pred_measure, target) / eye_dist).sum().item()
        # l1_loss = ((pred_measure.coord - target.coord).pow(2).sum(dim=2).sqrt().mean(dim=1) / eye_dist).sum().item()
        # print(w1_loss, l1_loss)
        sum_loss += ((pred_measure.coord - target.coord).pow(2).sum(dim=2).sqrt().mean(dim=1) / eye_dist).sum().item()
    return sum_loss / len(LazyLoader.mafl().test_dataset)
コード例 #10
0
    def forward(self, image: Tensor):
        B, C, D, D = image.shape
        heatmaps: List[Tensor] = self.model.forward(image)
        out = heatmaps[-1]

        coords = self.hm_to_coord(out)
        hm = self.heatmapper.forward(coords)

        # hm = self.up(self.postproc(out))
        # coords, _ = heatmap_to_measure(hm)
        # hm_g = self.heatmapper.forward(coords)

        assert coords.max().item() is not None
        assert coords.max().item() < 2

        return {
            "mes": UniformMeasure2D01(coords),
            "hm": hm,
            "hm_sum": hm.sum(dim=1, keepdim=True),
            "hm_g": None,
            "hm_g_sum": None,
        }
コード例 #11
0
def train_content(cont_opt, R_b, R_t, real_img, model, encoder_HG, decoder, generator, style_encoder):

    B = real_img.shape[0]
    C = 512

    heatmapper = ToGaussHeatMap(256, 1)

    requires_grad(encoder_HG, True)

    coefs = json.load(open("../parameters/content_loss.json"))
    encoded = encoder_HG(real_img)
    pred_measures: UniformMeasure2D01 = UniformMeasure2D01(encoded["coords"])

    heatmap_content = heatmapper.forward(encoded["coords"]).detach()


    ll = (
        WR.writable("R_b", R_b.__call__)(real_img, pred_measures) * coefs["R_b"] +
        WR.writable("R_t", R_t.__call__)(real_img, heatmap_content) * coefs["R_t"]
    )

    ll.minimize_step(cont_opt)
def train(generator, decoder, discriminator, encoder_HG, style_encoder, device,
          starting_model_number):
    latent_size = 512
    batch_size = 8
    sample_z = torch.randn(8, latent_size, device=device)
    Celeba.batch_size = batch_size
    W300DatasetLoader.batch_size = batch_size
    W300DatasetLoader.test_batch_size = 16

    test_img = next(LazyLoader.w300().loader_train_inf)["data"][:8].cuda()

    model = CondStyleGanModel(generator, StyleGANLoss(discriminator),
                              (0.001 / 4, 0.0015 / 4))

    style_opt = optim.Adam(style_encoder.parameters(),
                           lr=5e-4,
                           betas=(0.9, 0.99))
    cont_opt = optim.Adam(encoder_HG.parameters(), lr=3e-5, betas=(0.5, 0.97))

    g_transforms: albumentations.DualTransform = albumentations.Compose([
        ToNumpy(),
        NumpyBatch(
            albumentations.Compose([
                albumentations.ElasticTransform(p=0.7,
                                                alpha=150,
                                                alpha_affine=1,
                                                sigma=10),
                albumentations.ShiftScaleRotate(p=0.9, rotate_limit=15),
            ])),
        ToTensor(device),
    ])

    g_transforms_without_norm: albumentations.DualTransform = albumentations.Compose(
        [
            ToNumpy(),
            NumpyBatch(
                albumentations.Compose([
                    albumentations.ElasticTransform(p=0.3,
                                                    alpha=150,
                                                    alpha_affine=1,
                                                    sigma=10),
                    albumentations.ShiftScaleRotate(p=0.7, rotate_limit=15),
                ])),
            ToTensor(device),
        ])

    R_t = DualTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img: coord_hm_loss(
            encoder_HG(trans_dict['image'])["coords"], trans_dict['mask']))

    R_s = UnoTransformRegularizer.__call__(
        g_transforms, lambda trans_dict, img, ltnt: WR.L1("R_s")
        (ltnt, style_encoder(trans_dict['image'])))

    barycenter: UniformMeasure2D01 = UniformMeasure2DFactory.load(
        f"{Paths.default.models()}/face_barycenter_68").cuda().batch_repeat(
            batch_size)

    R_b = BarycenterRegularizer.__call__(barycenter, 1.0, 2.0, 4.0)

    tuner = GoldTuner([0.37, 2.78, 0.58, 1.43, 3.23],
                      device=device,
                      rule_eps=0.001,
                      radius=0.3,
                      active=False)

    trainer_gan = gan_trainer(model, generator, decoder, encoder_HG,
                              style_encoder, R_s, style_opt, g_transforms)
    content_trainer = content_trainer_with_gan(cont_opt, tuner, encoder_HG,
                                               R_b, R_t, model, generator,
                                               g_transforms, decoder,
                                               style_encoder)
    supervise_trainer = content_trainer_supervised(
        cont_opt, encoder_HG,
        LazyLoader.w300().loader_train_inf)

    for i in range(11000):
        WR.counter.update(i)

        requires_grad(encoder_HG, False)
        real_img = next(LazyLoader.celeba().loader).to(device)

        encoded = encoder_HG(real_img)
        internal_content = encoded["skeleton"].detach()

        trainer_gan(i, real_img, internal_content)
        # content_trainer(real_img)
        train_content(cont_opt, R_b, R_t, real_img, model, encoder_HG, decoder,
                      generator, style_encoder)
        supervise_trainer()

        encoder_ema.accumulate(encoder_HG.module, i, 0.98)
        if i % 50 == 0 and i > 0:
            encoder_ema.write_to(encoder_HG.module)

        if i % 100 == 0:
            coefs = json.load(open("../parameters/content_loss.json"))
            print(i, coefs)
            with torch.no_grad():

                # pred_measures_test, sparse_hm_test = encoder_HG(test_img)
                encoded_test = encoder_HG(test_img)
                pred_measures_test: UniformMeasure2D01 = UniformMeasure2D01(
                    encoded_test["coords"])
                heatmaper_256 = ToGaussHeatMap(256, 1.0)
                sparse_hm_test_1 = heatmaper_256.forward(
                    pred_measures_test.coord)

                latent_test = style_encoder(test_img)

                sparce_mask = sparse_hm_test_1.sum(dim=1, keepdim=True)
                sparce_mask[sparce_mask < 0.0003] = 0
                iwm = imgs_with_mask(test_img, sparce_mask)
                send_images_to_tensorboard(WR.writer, iwm, "REAL", i)

                fake_img, _ = generator(encoded_test["skeleton"], [sample_z])
                iwm = imgs_with_mask(fake_img, pred_measures_test.toImage(256))
                send_images_to_tensorboard(WR.writer, iwm, "FAKE", i)

                restored = decoder(encoded_test["skeleton"], latent_test)
                iwm = imgs_with_mask(restored, pred_measures_test.toImage(256))
                send_images_to_tensorboard(WR.writer, iwm, "RESTORED", i)

                content_test_256 = (encoded_test["skeleton"]).repeat(1, 3, 1, 1) * \
                    torch.tensor([1.0, 1.0, 0.0], device=device).view(1, 3, 1, 1)

                content_test_256 = (content_test_256 - content_test_256.min()
                                    ) / content_test_256.max()
                send_images_to_tensorboard(WR.writer,
                                           content_test_256,
                                           "HM",
                                           i,
                                           normalize=False,
                                           range=(0, 1))

        if i % 50 == 0 and i >= 0:
            test_loss = liuboff(encoder_HG)
            print("liuboff", test_loss)
            # test_loss = nadbka(encoder_HG)
            tuner.update(test_loss)
            WR.writer.add_scalar("liuboff", test_loss, i)

        if i % 10000 == 0 and i > 0:
            torch.save(
                {
                    'g': generator.module.state_dict(),
                    'd': discriminator.module.state_dict(),
                    'c': encoder_HG.module.state_dict(),
                    "s": style_encoder.state_dict(),
                    "e": encoder_ema.storage_model.state_dict()
                },
                f'{Paths.default.models()}/stylegan2_new_{str(i + starting_model_number).zfill(6)}.pt',
            )
コード例 #13
0
# image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)


for i in range(100000):

    WR.counter.update(i)

    batch = next(LazyLoader.cardio().loader_train_inf)
    real_img = batch["image"].cuda()
    train_landmarks = batch["keypoints"].cuda()

    coefs = json.load(open(os.path.join(sys.path[0], "../parameters/cycle_loss_2.json")))

    WR.writable("sup", mes_loss.forward)(hg.forward(real_img)["mes"], UniformMeasure2D01(train_landmarks)).__mul__(coefs["sup"]) \
        .minimize_step(cont_opt)

    hm_accumulator.step(i)

    if i % 1000 == 0 and i > 0:
        torch.save(
            {
                'gh': hg.state_dict(),
            },
            f'{Paths.default.models()}/cardio_brule_sup_{str(i + starting_model_number).zfill(6)}.pt',
        )


    if i % 100 == 0:
        print(i)
コード例 #14
0
gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator, StyleGANLoss(discriminator_img), (0.001/4, 0.0015/4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg, StyleGANLoss(hm_discriminator, r1=3), (2e-5, 0.0015/4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

print(f"board path: {Paths.default.board()}/cardio{int(time.time())}")
writer = SummaryWriter(f"{Paths.default.board()}/cardio{int(time.time())}")
WR.writer = writer

#%%

test_batch = next(LazyLoader.cardio().loader_train_inf)
test_img = test_batch["image"].cuda()
test_landmarks = next(LazyLoader.cardio_landmarks(args.data_path).loader_train_inf).cuda()
test_measure = UniformMeasure2D01(torch.clamp(test_landmarks, max=1))
test_hm = heatmapper.forward(test_measure.coord).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=100000, was_coef=2000)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

for i in range(100000):

    WR.counter.update(i)

    try:
        real_img = next(LazyLoader.cardio().loader_train_inf)["image"].cuda()
コード例 #15
0
ファイル: hm2img.py プロジェクト: nazarblch/brule2
    fake_latent_pred = enc_dec.encode_latent(fake)

    gan_model_tuda.discriminator_train([real_img], [fake.detach()])
    (
        gan_model_tuda.generator_loss([real_img], [fake]) +
        l1_loss(fake_latent_pred, fake_latent) * coefs["style"]
    ).minimize_step(gan_model_tuda.optimizer.opt_min, style_opt)

    hm_pred = hg.forward(real_img)["hm_sum"]
    hm_ref = heatmapper.forward(landmarks).detach().sum(1, keepdim=True)
    gan_model_obratno.discriminator_train([hm_ref], [hm_pred.detach()])
    gan_model_obratno.generator_loss([hm_ref], [hm_pred]).__mul__(coefs["obratno"])\
        .minimize_step(gan_model_obratno.optimizer.opt_min)

    fake2, _ = enc_dec.generate(heatmap_sum)
    WR.writable("cycle", mes_loss.forward)(hg.forward(fake2)["mes"], UniformMeasure2D01(landmarks)).__mul__(coefs["hm"]) \
        .minimize_step(gan_model_tuda.optimizer.opt_min, gan_model_obratno.optimizer.opt_min)

    latent = enc_dec.encode_latent(g_transforms(image=real_img)["image"])
    restored = enc_dec.decode(hg.forward(real_img)["hm_sum"], latent)
    WR.writable("cycle2", psp_loss.forward)(real_img, real_img, restored, latent).__mul__(coefs["img"])\
        .minimize_step(gan_model_tuda.optimizer.opt_min, gan_model_obratno.optimizer.opt_min, style_opt)

    image_accumulator.step(i)
    hm_accumulator.step(i)

    if i % 10000 == 0 and i > 0:
        torch.save(
            {
                'gi': enc_dec.generator.state_dict(),
                'gh': hg.state_dict(),
コード例 #16
0
        bc_net.zero_grad()
        ll = (bc_net(ws).reshape(-1, padding, 2) - bs).pow(2).sum() / 32
        print(ll.item())
        lll = ll.item()
        ll.backward()
        bc_net_opt.step()

    # replay_buf = replay_buf[replay_buf.__len__() - 32:]

    barycenter.coord = bc_net(weights[None, :]).reshape(1, padding, 2).detach()
    return barycenter, lll


# mafl_dataloader = LazyLoader.w300().loader_train_inf
# mes = UniformMeasure2D01(next(mafl_dataloader)['meta']['keypts_normalized'].type(torch.float32)).cuda()
mes = UniformMeasure2D01(next(iter(
    LazyLoader.celeba_test(batch_size)))[1]).cuda()

for j in range(10000):
    weights = Dirichlet(torch.ones(batch_size) / 10).sample().cuda()
    barycenter, lll = compute_wbc(mes, weights, min(200, j + 10))

    if j % 50 == 0:
        print(j)
        sced.step(lll)

    if j % 50 == 0:
        plt.imshow(barycenter.toImage(200)[0][0].detach().cpu().numpy())
        plt.show()

    starting_model_number = 0
    if j % 1000 == 0 and j > 0:
コード例 #17
0
                                               StyleGANLoss(discriminator_img),
                                               (0.001 / 4, 0.0015 / 4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(
    f"{Paths.default.board()}/brule1_cardio_{int(time.time())}")
WR.writer = writer

batch = next(LazyLoader.cardio().loader_train_inf)

batch_test = next(iter(LazyLoader.cardio().test_loader))
test_img = batch["image"].cuda()

test_landmarks = batch["keypoints"].cuda()
test_measure = UniformMeasure2D01(torch.clamp(test_landmarks, max=1))
test_hm = heatmapper.forward(test_measure.coord).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=10000, was_coef=100)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

g_transforms_2: albumentations.DualTransform = albumentations.Compose([
    ToNumpy(),
    NumpyBatch(
        albumentations.Compose([
            albumentations.ElasticTransform(p=0.7,
                                            alpha=150,
コード例 #18
0
        # f'{Paths.default.nn()}/stylegan2_w300_{str(starting_model_number).zfill(6)}.pt',
        map_location="cpu")

    generator.load_state_dict(weights['g'])
    style_encoder.load_state_dict(weights['s'])
    encoder_HG.load_state_dict(weights['e'])

    encoder_ema.storage_model.load_state_dict(weights['e'])

    generator = generator.cuda()
    encoder_HG = encoder_HG.cuda()
    style_encoder = style_encoder.cuda()
    decoder = ConditionalDecoder(generator)

    test_img = next(LazyLoader.w300().loader_train_inf)["data"][:8].cuda()

    with torch.no_grad():
        # pred_measures_test, sparse_hm_test = encoder_HG(test_img)
        encoded_test = encoder_HG(test_img)
        pred_measures_test: UniformMeasure2D01 = UniformMeasure2D01(
            encoded_test["coords"])
        heatmaper_256 = ToGaussHeatMap(256, 1.0)
        sparse_hm_test_1 = heatmaper_256.forward(pred_measures_test.coord)

        latent_test = style_encoder(test_img)

        sparce_mask = sparse_hm_test_1.sum(dim=1, keepdim=True)
        sparce_mask[sparce_mask < 0.0003] = 0
        iwm = imgs_with_mask(test_img, sparce_mask)
        send_images_to_tensorboard(WR.writer, iwm, "REAL", i)