Esempio n. 1
0
writer = SummaryWriter(f"{Paths.default.board()}/cardio{int(time.time())}")
WR.writer = writer

#%%

test_batch = next(LazyLoader.cardio().loader_train_inf)
test_img = test_batch["image"].cuda()
test_landmarks = next(LazyLoader.cardio_landmarks(args.data_path).loader_train_inf).cuda()
test_measure = UniformMeasure2D01(torch.clamp(test_landmarks, max=1))
test_hm = heatmapper.forward(test_measure.coord).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=100000, was_coef=2000)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

for i in range(100000):

    WR.counter.update(i)

    try:
        real_img = next(LazyLoader.cardio().loader_train_inf)["image"].cuda()
        landmarks = next(LazyLoader.cardio_landmarks(args.data_path).loader_train_inf).cuda()
        measure = UniformMeasure2D01(torch.clamp(landmarks, max=1))
        heatmap_sum = heatmapper.forward(measure.coord).sum(1, keepdim=True).detach()
    except Exception as e:
        print(e)
        continue
Esempio n. 2
0
discriminator = Discriminator(image_size)

starting_model_number = 290000
weights = torch.load(
    f'{Paths.default.models()}/celeba_gan_256_{str(starting_model_number).zfill(6)}.pt',
    map_location="cpu")

discriminator.load_state_dict(weights['d'])
generator.load_state_dict(weights['g'])

generator = generator.cuda()
discriminator = discriminator.cuda()

gan_model = StyleGanModel(generator, StyleGANLoss(discriminator),
                          (0.001, 0.0015))
gan_accumulator = Accumulator(generator, decay=0.99, write_every=100)

writer = SummaryWriter(f"{Paths.default.board()}/celeba{int(time.time())}")

print("Starting Training Loop...")
starting_model_number = 0

for i in range(300000):

    print(i)

    real_img = next(LazyLoader.celeba().loader).to(device)

    noise: List[Tensor] = mixing_noise(batch_size, noise_size, 0.9, device)
    fake, latent = generator.forward(noise, return_latents=True)
Esempio n. 3
0
WR.writer = writer

N = 3148

heatmapper = ToGaussHeatMap(256, 4)

batch = next(LazyLoader.w300().loader_train_inf)
test_img = batch["data"].cuda()
test_landmarks = torch.clamp(batch["meta"]["keypts_normalized"].cuda(),
                             max=1).cuda()
test_hm = heatmapper.forward(test_landmarks).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(l2_lambda=2.0).cuda()

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
enc_accumulator = Accumulator(enc_dec.style_encoder,
                              decay=0.99,
                              write_every=100)

for i in range(100000):

    WR.counter.update(i)

    batch = next(LazyLoader.w300().loader_train_inf)

    real_img = next(LazyLoader.w300().loader_train_inf)["data"].cuda()
    landmarks = torch.clamp(batch["meta"]["keypts_normalized"].cuda(),
                            max=1).cuda()
    heatmap_sum = heatmapper.forward(landmarks).sum(1, keepdim=True).detach()
Esempio n. 4
0
                   EqualLinear(256, 136), nn.Sigmoid(), View(68, 2))
hg.load_state_dict(weights['gh'])
hg = hg.cuda()
hm_discriminator = Discriminator(image_size, input_nc=1, channel_multiplier=1)
hm_discriminator.load_state_dict(weights["dh"])
hm_discriminator = hm_discriminator.cuda()

gan_model = StyleGanModel[nn.Module](hg, StyleGANLoss(hm_discriminator),
                                     (0.001, 0.0015))

writer = SummaryWriter(f"{Paths.default.board()}/lmgen{int(time.time())}")
WR.writer = writer

test_noise = torch.randn(batch_size, 100, device=device)

hm_accumulator = Accumulator(hg, decay=0.98, write_every=100)

#
# os.mkdir(f"{Paths.default.data()}/w300_gen_{N}")
# os.mkdir(f"{Paths.default.data()}/w300_gen_{N}/lmbc")
#
#
for i in range(7000 // batch_size):
    noise = torch.randn(batch_size, 100, device=device)
    batch_ldmks = hg.forward(noise).detach().cpu().numpy()
    print(i)
    for j in range(batch_size):
        b: Tensor = batch_ldmks[j]
        # b[43] = b[42]
        print((b < 0.001).nonzero())
        np.save(
Esempio n. 5
0
# gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator, StyleGANLoss(discriminator_img), (0.001/4, 0.0015/4))

# style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(f"{Paths.default.board()}/brule1_cardio_{int(time.time())}")
WR.writer = writer

batch = next(iter(LazyLoader.cardio().test_loader))
test_img = batch["image"].cuda()


# psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=10000, was_coef=100)

# image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)


for i in range(100000):

    WR.counter.update(i)

    batch = next(LazyLoader.cardio().loader_train_inf)
    real_img = batch["image"].cuda()
    train_landmarks = batch["keypoints"].cuda()

    coefs = json.load(open(os.path.join(sys.path[0], "../parameters/cycle_loss_2.json")))

    WR.writable("sup", mes_loss.forward)(hg.forward(real_img)["mes"], UniformMeasure2D01(train_landmarks)).__mul__(coefs["sup"]) \
        .minimize_step(cont_opt)