コード例 #1
0
import sys, os

from dataset.d300w import ThreeHundredW
import ot

from dataset.lazy_loader import Cardio, LazyLoader
from parameters.path import Paths
import numpy as np
from torch.utils.data import Subset

image_size = 256
padding = 200
N = 300
prob = np.ones(padding) / padding

dataset_train = LazyLoader.cardio_landmarks("cardio_300/lm").dataset_train


def load_landmarks(k):
    return dataset_train[k].numpy()


landmarks = [load_landmarks(k) for k in range(N)]


def compute_w2(i, j):
    M_ij = ot.dist(landmarks[i], landmarks[j])
    D_ij = ot.emd2(prob, prob, M_ij)
    return D_ij

コード例 #2
0
hm_discriminator = hm_discriminator.cuda()

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator, StyleGANLoss(discriminator_img), (0.001/4, 0.0015/4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg, StyleGANLoss(hm_discriminator, r1=3), (2e-5, 0.0015/4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

print(f"board path: {Paths.default.board()}/cardio{int(time.time())}")
writer = SummaryWriter(f"{Paths.default.board()}/cardio{int(time.time())}")
WR.writer = writer

#%%

test_batch = next(LazyLoader.cardio().loader_train_inf)
test_img = test_batch["image"].cuda()
test_landmarks = next(LazyLoader.cardio_landmarks(args.data_path).loader_train_inf).cuda()
test_measure = UniformMeasure2D01(torch.clamp(test_landmarks, max=1))
test_hm = heatmapper.forward(test_measure.coord).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=100000, was_coef=2000)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

for i in range(100000):

    WR.counter.update(i)

    try:
コード例 #3
0
from dataset.lazy_loader import LazyLoader, W300DatasetLoader, CelebaWithKeyPoints, Celeba
from sklearn.neighbors import NearestNeighbors
import numpy as np
import matplotlib.pyplot as plt
from dataset.toheatmap import ToGaussHeatMap
from dataset.probmeasure import UniformMeasure2D01
import pandas as pd
import networkx as nx
import ot
from barycenters.sampler import Uniform2DBarycenterSampler, Uniform2DAverageSampler
from parameters.path import Paths
from joblib import Parallel, delayed

N = 701
dataset = LazyLoader.cardio_landmarks(f"cardio_{N}/lm").dataset_train
D = np.load(f"{Paths.default.models()}/cardio_graph{N}.npy")
padding = 200
prob = np.ones(padding) / padding
NS = 1000


def LS(k):
    return dataset[k].numpy()


ls = np.asarray([LS(k) for k in range(N)])


def viz_mes(ms):
    heatmaper = ToGaussHeatMap(128, 1)
コード例 #4
0
gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator,
                                               StyleGANLoss(discriminator_img),
                                               (0.001 / 4, 0.0015 / 4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg,
                                               StyleGANLoss(hm_discriminator),
                                               (2e-5, 0.0015 / 4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(f"{Paths.default.board()}/cardio{int(time.time())}")
WR.writer = writer

test_batch = next(LazyLoader.cardio().loader_train_inf)
test_img = test_batch["image"].cuda()
test_landmarks = next(LazyLoader.cardio_landmarks().loader_train_inf).cuda()
test_hm = heatmapper.forward(test_landmarks).sum(1, keepdim=True).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceWasLoss(heatmapper, bce_coef=1000000, was_coef=4000)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

for i in range(100000):

    WR.counter.update(i)

    real_img = next(LazyLoader.cardio().loader_train_inf)["image"].cuda()
    landmarks = next(LazyLoader.cardio_landmarks().loader_train_inf).cuda()