예제 #1
0
 def __init__(self,
              npop,
              target,
              cpkt="stylegan2/stylegan2-ffhq-config-f.pt",
              lamb=5.0,
              sigma=0.4,
              device="cuda",
              trunc=.6,
              mask=None,
              ploss=False):
     with torch.no_grad():
         self.mask = mask
         self.device = device
         self.npop = npop
         self.target = target
         self.lamb = lamb
         self.sigma = sigma
         self.generator = Generator(IMG_DIM,
                                    LATENT_DIM,
                                    8,
                                    channel_multiplier=2).to(self.device)
         self.generator.eval()
         checkpoint = torch.load(cpkt)
         self.generator.load_state_dict(checkpoint['g_ema'])
         sample = torch.randn(npop, LATENT_DIM, device=device)
         self.trunc_target = self.generator.mean_latent(4096)
         # matrix of genomes
         self.trunc = trunc
         self.genomes = self.trunc_target + self.trunc * (
             self.generator.get_latent(sample) - self.trunc_target)
         self.fitness = []
         self.ranks = torch.zeros([npop], device="cpu")
         self.faces = torch.zeros([npop, 3, IMG_DIM, IMG_DIM], device="cpu")
         if ploss:
             self.percept = lpips.PerceptualLoss(
                 model='net-lin',
                 net='vgg',
                 use_gpu=device.startswith('cuda'))
         else:
             self.percept = None
         self.generate()
예제 #2
0
def main(args):
    ensure_checkpoint_exists(args.ckpt)
    text_inputs = torch.cat([clip.tokenize(args.description)]).cuda()
    os.makedirs(args.results_dir, exist_ok=True)

    g_ema = Generator(args.size, 512, 8)
    g_ema.load_state_dict(torch.load(args.ckpt)["g_ema"], strict=False)
    g_ema.eval()
    g_ema = g_ema.cuda()
    mean_latent = g_ema.mean_latent(4096)

    if args.latent_path:
        latent_code_init = torch.load(args.latent_path).cuda()
    elif args.mode == "edit":
        latent_code_init_not_trunc = torch.randn(1, 512).cuda()
        with torch.no_grad():
            _, latent_code_init = g_ema([latent_code_init_not_trunc],
                                        return_latents=True,
                                        truncation=args.truncation,
                                        truncation_latent=mean_latent)
    else:
        latent_code_init = mean_latent.detach().clone().repeat(1, 18, 1)

    latent = latent_code_init.detach().clone()
    latent.requires_grad = True

    clip_loss = CLIPLoss()

    optimizer = optim.Adam([latent], lr=args.lr)

    pbar = tqdm(range(args.step))

    for i in pbar:
        t = i / args.step
        lr = get_lr(t, args.lr)
        optimizer.param_groups[0]["lr"] = lr

        img_gen, _ = g_ema([latent],
                           input_is_latent=True,
                           randomize_noise=False)

        c_loss = clip_loss(img_gen, text_inputs)

        if args.mode == "edit":
            l2_loss = ((latent_code_init - latent)**2).sum()
            loss = c_loss + args.l2_lambda * l2_loss
        else:
            loss = c_loss

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        pbar.set_description((f"loss: {loss.item():.4f};"))
        if args.save_intermediate_image_every > 0 and i % args.save_intermediate_image_every == 0:
            with torch.no_grad():
                img_gen, _ = g_ema([latent],
                                   input_is_latent=True,
                                   randomize_noise=False)

            torchvision.utils.save_image(img_gen,
                                         f"results/{str(i).zfill(5)}.png",
                                         normalize=True,
                                         range=(-1, 1))

    if args.mode == "edit":
        with torch.no_grad():
            img_orig, _ = g_ema([latent_code_init],
                                input_is_latent=True,
                                randomize_noise=False)

        final_result = torch.cat([img_orig, img_gen])
    else:
        final_result = img_gen

    return final_result
if __name__ == '__main__':
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    torch.cuda.set_device(device)
    encoder_HG = HG_softmax2020(num_classes=5, heatmap_size=64)
    # encoder_HG.load_state_dict(torch.load("/home/ibespalov/pomoika/hg2_e29.pt", map_location="cpu"))

    print("HG")

    latent = 512
    n_mlp = 5
    size = 256

    generator = CondGen3(Generator(
        size, latent, n_mlp, channel_multiplier=1,
    ), heatmap_channels=5)

    discriminator = CondDisc3(
        size, heatmap_channels=5, channel_multiplier=1
    )

    style_encoder = StyleEncoder(style_dim=latent)

    starting_model_number = 190000  # 170000
    weights = torch.load(
        f'{Paths.default.models()}/stylegan2_MAFL_{str(starting_model_number).zfill(6)}.pt',
        # f'{Paths.default.nn()}/zhores/stylegan2_w300_{str(starting_model_number).zfill(6)}.pt',
        map_location="cpu"
    )
예제 #4
0
def generate(args, g_ema, device, mean_latent):
    with torch.no_grad():
        g_ema.eval()
        for i in tqdm(range(args.pics)):
            sample_z = torch.randn(args.sample, args.latent, device=device)

            sample, _ = g_ema(
                [sample_z], truncation=args.truncation, truncation_latent=mean_latent
            )

            utils.save_image(
                sample,
                f"sample/{str(i).zfill(6)}.png",
                nrow=1,
                normalize=True,
                range=(-1, 1),
            )

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

stylegan_gen = Generator(size=1024, style_dim=512, n_mlp=8, channel_multiplier=2).to(device)

checkpoint = torch.load('./twdne3.pt')

stylegan_gen.load_state_dict(checkpoint["g_ema"])


mean_latent = None

#generate(args, stylegan_gen, device, mean_latent)
예제 #5
0
    torch.cuda.set_device(device)

    cont_style_encoder: MunitEncoder = cont_style_munit_enc(
        munit_args,
        None,  # "/home/ibespalov/pomoika/munit_content_encoder15.pt",
        None  # "/home/ibespalov/pomoika/munit_style_encoder_1.pt"
    )  #.to(device)

    args.latent = 512
    args.n_mlp = 5

    args.start_iter = 0

    generator = CondGen2(
        Generator(args.size,
                  args.latent,
                  args.n_mlp,
                  channel_multiplier=args.channel_multiplier))  #.to(device)

    discriminator = CondStyleDisc2Wrapper(
        Discriminator(
            args.size,
            channel_multiplier=args.channel_multiplier))  #.to(device)

    g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
    d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)

    image_size = args.size
    transform = albumentations.Compose([
        albumentations.HorizontalFlip(),
        albumentations.Resize(image_size, image_size),
        albumentations.ElasticTransform(p=0.5,
예제 #6
0
    print(device)
    torch.cuda.set_device(device)

    encoder_HG = HG_softmax2020(num_classes=68, heatmap_size=64)
    encoder_HG.load_state_dict(
        torch.load("/trinity/home/n.buzun/PycharmProjects/saved/hg2_e29.pt",
                   map_location="cpu"))
    encoder_HG = encoder_HG.cuda()

    print("HG")

    latent = 512
    n_mlp = 5
    size = 256

    generator = CondGen3(Generator(size, latent, n_mlp, channel_multiplier=1))

    discriminator = CondDisc3(size, channel_multiplier=1)

    starting_model_number = 110000

    generator = generator.cuda()
    discriminator = discriminator.to(device)

    generator = nn.DataParallel(generator, [0, 1, 2, 3])
    discriminator = nn.DataParallel(discriminator, [0, 1, 2, 3])
    encoder_HG = nn.DataParallel(encoder_HG, [0, 1, 2, 3])

    style_encoder = StyleEncoder(style_dim=latent).cuda()

    weights = torch.load(
예제 #7
0
    vae_ckpt = torch.load(args.vae)
    vae_args = vae_ckpt['args']
    vae = VAE(3, n_latent=vae_args.n_latent, size=vae_args.input_size)
    vae.load_state_dict(vae_ckpt['vae'])
    vae = vae.enc.to(device)

    args.latent = 512
    args.n_mlp = 8

    args.start_iter = 0

    generator = Generator(
        args.size,
        args.latent,
        args.n_mlp,
        channel_multiplier=args.channel_multiplier,
        style_in_dim=args.latent + vae_args.n_latent,
    ).to(device)
    discriminator = Discriminator(
        args.size, channel_multiplier=args.channel_multiplier).to(device)
    g_ema = Generator(
        args.size,
        args.latent,
        args.n_mlp,
        channel_multiplier=args.channel_multiplier,
        style_in_dim=args.latent + vae_args.n_latent,
    ).to(device)
    g_ema.eval()
    accumulate(g_ema, generator, 0)
예제 #8
0
    parser.add_argument('--space', choices=['z', 'w'])
    parser.add_argument('--batch', type=int, default=64)
    parser.add_argument('--n_sample', type=int, default=5000)
    parser.add_argument('--size', type=int, default=256)
    parser.add_argument('--eps', type=float, default=1e-4)
    parser.add_argument('--crop', action='store_true')
    parser.add_argument('ckpt', metavar='CHECKPOINT')

    args = parser.parse_args()

    latent_dim = 512

    ckpt = torch.load(args.ckpt)

    g = Generator(args.size, latent_dim, 8).to(device)
    g.load_state_dict(ckpt['g_ema'])
    g.eval()

    percept = lpips.PerceptualLoss(model='net-lin',
                                   net='vgg',
                                   use_gpu=device.startswith('cuda'))

    distances = []

    n_batch = args.n_sample // args.batch
    resid = args.n_sample - (n_batch * args.batch)
    batch_sizes = [args.batch] * n_batch + [resid]

    with torch.no_grad():
        for batch in tqdm(batch_sizes):
예제 #9
0
import torch
from stylegan2.model import Generator
from stylegan2 import ppl
import matplotlib.pyplot as plt
import numpy as np

# Test out style mixing, interpolation

# Load the model
device = "cuda"
g_ema = Generator(1024, 512, 8, channel_multiplier=2).to(device)
checkpoint = torch.load("stylegan2/stylegan2-ffhq-config-f.pt")
g_ema.load_state_dict(checkpoint['g_ema'])

with torch.no_grad():
    # Test new mutation heuristic
    latent1 = torch.randn(1, 512, device=device)
    latent2 = torch.randn(1, 512, device=device)
    trunc_target = g_ema.mean_latent(4096)
    w1 = g_ema.get_latent(latent1)
    w1 = trunc_target + .7 * (w1 - trunc_target)
    w2 = g_ema.get_latent(latent2)
    trunc_target = g_ema.mean_latent(4096)

    fig, axs = plt.subplots(3, 3)
    for i in range(8):
        torch.cuda.empty_cache()
        im = w1.clone()
        p1 = 64 * i
        p2 = 64 * (i + 1)
        diff = (w2[0, :] - trunc_target)