Beispiel #1
0
def draw_interpolation_images(png, cols, rows):
    z_1 = torch.randn(cols, opt.nz, device=device)
    _, tag_1 = utils.fake_generator(cols, opt.nz, device)
    z_2 = torch.randn(cols, opt.nz, device=device)
    _, tag_2 = utils.fake_generator(cols, opt.nz, device)
    dz = (z_2 - z_1) / rows
    dtag = (tag_2 - tag_1) / rows
    noises = torch.FloatTensor(cols * rows, opt.nz).to(device)
    tags = torch.FloatTensor(cols * rows, len(utils.tag)).to(device)
    for i in range(rows):
        noises[cols * i:cols * (i + 1), :] = z_1 + i * dz
        tags[cols * i:cols * (i + 1), :] = tag_1 + i * dtag
    images = netG(noises, tags)
    vutils.save_image(utils.denorm(images), png, nrow=cols, padding=0)
Beispiel #2
0
def draw_fix_tag_images(png, cols, rows):
    noises, _ = utils.fake_generator(cols * rows, opt.nz, device=device)
    tag = ['green hair', 'red eyes']
    tag = utils.get_one_hot_tag(tag)
    tag = torch.FloatTensor(tag).view(1, -1).to(device)
    tags = torch.cat([tag for _ in range(cols * rows)], dim=0)
    images = netG(noises, tags).detach()
    vutils.save_image(utils.denorm(images), png, nrow=cols, padding=0)
Beispiel #3
0
def generate_image(png, cols, rows, utag):
    noises, tags = utils.fake_generator(cols * rows, NZ, device=device)
    if len(utag) != 0:
        tag = utils.get_one_hot_tag(utag)
        tag = torch.FloatTensor(tag).view(1, -1).to(device)
        tags = torch.cat([tag for _ in range(cols * rows)], dim=0)
    images = NETG(noises, tags).detach()
    path = "./generate"
    try:
        os.makedirs(path)
    except OSError:
        pass
    for i, image in enumerate(images):
        vutils.save_image(utils.denorm(image),
                          os.path.join(path,
                                       str(i) + ".png"))
    vutils.save_image(utils.denorm(images), png, nrow=cols, padding=0)
Beispiel #4
0
def draw_fix_noise_images(png, cols, rows):
    z = torch.randn(1, opt.nz, device=device)
    noises = torch.cat([z for _ in range(cols * rows)], dim=0)
    _, tags = utils.fake_generator(cols * rows, opt.nz, device=device)
    images = netG(noises, tags).detach()
    vutils.save_image(utils.denorm(images), png, nrow=cols, padding=0)
Beispiel #5
0
def draw_generated_images(png, cols, rows):
    # random tag
    noises, tags = utils.fake_generator(cols * rows, opt.nz, device=device)
    images = netG(noises, tags).detach()
    vutils.save_image(utils.denorm(images), png, nrow=cols, padding=0)
Beispiel #6
0
def main(writer):
    dataset = AnimeDataset(avatar_tag_dat_path,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5),
                                                    (0.5, 0.5, 0.5))
                           ]))
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=num_workers,
                                              drop_last=True)
    G = Generator(noise_size, len(utils.hair) + len(utils.eyes)).to(device)
    D = Discriminator(len(utils.hair), len(utils.eyes)).to(device)
    G_optim = torch.optim.Adam(G.parameters(),
                               lr=learning_rate_g,
                               betas=(beta_1, 0.999))
    D_optim = torch.optim.Adam(D.parameters(),
                               lr=learning_rate_d,
                               betas=(beta_1, 0.999))
    criterion = nn.BCELoss()

    # training
    iteration = 0
    real_label = torch.ones(batch_size).to(device)
    # real_label = torch.Tensor(batch_size).uniform_(0.9, 1).to(device)  # soft labeling
    fake_label = torch.zeros(batch_size).to(device)
    for epoch in range(max_epoch + 1):
        for i, (real_tag, real_img) in enumerate(data_loader):
            real_img = real_img.to(device)
            real_tag = real_tag.to(device)

            # train D with real images
            D.zero_grad()
            real_score, real_predict = D(real_img)
            real_discrim_loss = criterion(real_score, real_label)
            real_classifier_loss = criterion(real_predict, real_tag)

            # train D with fake images
            z, fake_tag = utils.fake_generator(batch_size, noise_size, device)
            fake_img = G(z, fake_tag).to(device)
            fake_score, fake_predict = D(fake_img)
            fake_discrim_loss = criterion(fake_score, fake_label)

            discrim_loss = (real_discrim_loss + fake_discrim_loss) * 0.5
            classifier_loss = real_classifier_loss * lambda_cls

            # gradient penalty
            alpha_size = [1] * real_img.dim()
            alpha_size[0] = real_img.size(0)
            alpha = torch.rand(alpha_size).to(device)
            x_hat = Variable(alpha * real_img.data + (1 - alpha) *
                             (real_img.data + 0.5 * real_img.data.std() *
                              torch.rand(real_img.size()).to(device)),
                             requires_grad=True).to(device)
            fake_score, fake_tag = D(x_hat)
            gradients = grad(outputs=fake_score,
                             inputs=x_hat,
                             grad_outputs=torch.ones(
                                 fake_score.size()).to(device),
                             create_graph=True,
                             retain_graph=True,
                             only_inputs=True)[0].view(x_hat.size(0), -1)
            gradient_penalty = lambda_gp * (
                (gradients.norm(2, dim=1) - 1)**2).mean()

            D_loss = discrim_loss + classifier_loss + gradient_penalty
            D_loss.backward()
            D_optim.step()

            # train G
            G.zero_grad()
            z, fake_tag = utils.fake_generator(batch_size, noise_size, device)
            fake_img = G(z, fake_tag).to(device)
            fake_score, fake_predict = D(fake_img)

            discrim_loss = criterion(fake_score, real_label)
            classifier_loss = criterion(fake_predict, fake_tag) * lambda_cls

            G_loss = discrim_loss + classifier_loss
            G_loss.backward()
            G_optim.step()

            # plot loss curve
            writer.add_scalar('Loss_D', D_loss.item(), iteration)
            writer.add_scalar('Loss_G', G_loss.item(), iteration)
            print('[{}/{}][{}/{}] Iteration: {}'.format(
                epoch, max_epoch, i, len(data_loader), iteration))

            if iteration % interval == interval - 1:
                fake_img = G(fix_noise, fix_tag)
                vutils.save_image(utils.denorm(fake_img[:64, :, :, :]),
                                  os.path.join(
                                      image_path,
                                      'fake_image_{}.png'.format(iteration)),
                                  padding=0)
                vutils.save_image(utils.denorm(real_img[:64, :, :, :]),
                                  os.path.join(
                                      image_path,
                                      'real_image_{}.png'.format(iteration)),
                                  padding=0)
                grid = vutils.make_grid(utils.denorm(fake_img[:64, :, :, :]),
                                        padding=0)
                writer.add_image('generation results', grid, iteration)

            iteration += 1
        # checkpoint
        torch.save(G.state_dict(),
                   os.path.join(model_path, 'netG_epoch_{}.pth'.format(epoch)))
        torch.save(D.state_dict(),
                   os.path.join(model_path, 'netD_epoch_{}.pth'.format(epoch)))
Beispiel #7
0
avatar_tag_dat_path = opt.avatar_tag_dat_path
learning_rate_g = opt.learning_rate_g
learning_rate_d = opt.learning_rate_d
beta_1 = opt.beta_1
batch_size = opt.batch_size
max_epoch = opt.max_epoch
num_workers = opt.num_workers
noise_size = opt.noise_size
lambda_cls = opt.lambda_cls
lambda_gp = opt.lambda_gp
interval = opt.interval
output_path = opt.output_path
model_path = os.path.join(output_path, "model", "")
image_path = os.path.join(output_path, "image", "")
log_dir = opt.log_dir
fix_noise, fix_tag = utils.fake_generator(batch_size, noise_size, device)


def main(writer):
    dataset = AnimeDataset(avatar_tag_dat_path,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5),
                                                    (0.5, 0.5, 0.5))
                           ]))
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=num_workers,
                                              drop_last=True)
    G = Generator(noise_size, len(utils.hair) + len(utils.eyes)).to(device)