def train_D(x_real, labels):
    start_time = time.time()
    # sets training mode
    G.train()
    D.train()

    z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
    x_fake = G(z).detach()

    x_real_d_logit = D(x_real)
    x_fake_d_logit = D(x_fake)
    sigmoid_x_real_d_logits = sigmoid_func(x_real_d_logit)
    histogram = []
    avg_validities = []
    if args.top_k == True:
        _, idx = torch.topk(
            sigmoid_x_real_d_logits.squeeze(), k=args.batch_size, dim=0, largest=args.largest)
        x_real_d_logit = x_real_d_logit[idx]

    x_real_d_loss, x_fake_d_loss = d_loss_fn(x_real_d_logit, x_fake_d_logit)
    gp = gan.gradient_penalty(functools.partial(
        D), x_real, x_fake, gp_mode=args.gradient_penalty_mode, sample_mode=args.gradient_penalty_sample_mode)

    D_loss = (x_real_d_loss + x_fake_d_loss) + \
        gp * args.gradient_penalty_weight

    D.zero_grad()
    D_loss.backward()
    D_optimizer.step()
    elapsed_time = time.time() - start_time
    elapsed_time_dict['gen_disc_compl'].append(elapsed_time)

    return {'d_loss': (x_real_d_loss + x_fake_d_loss).data.cpu().numpy(),
            'gp': gp.data.cpu().numpy()}
def train_D(x_real):
    G.train()
    D.train()

    z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
    x_fake = G(z).detach()

    x_real_d_logit = D(x_real)
    x_fake_d_logit = D(x_fake)

    x_real_d_loss, x_fake_d_loss = d_loss_fn(x_real_d_logit, x_fake_d_logit)
    gp = gan.gradient_penalty(functools.partial(D),
                              x_real,
                              x_fake,
                              gp_mode=args.gradient_penalty_mode,
                              sample_mode=args.gradient_penalty_sample_mode)

    D_loss = (x_real_d_loss +
              x_fake_d_loss) + gp * args.gradient_penalty_weight

    D.zero_grad()
    D_loss.backward()
    D_optimizer.step()

    return {'d_loss': x_real_d_loss + x_fake_d_loss, 'gp': gp}
Beispiel #3
0
def train_D(x_real):
    G.train()
    D.train()

    z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
    x_fake = G(z).detach()

    # fake image 1d power spectrum
    psd1D_img = np.zeros([x_fake.shape[0], N])
    for t in range(x_fake.shape[0]):
        gen_imgs = x_fake.permute(0, 2, 3, 1)
        img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
        img_gray = RGB2gray(img_numpy)
        fft = np.fft.fft2(img_gray)
        fshift = np.fft.fftshift(fft)
        fshift += epsilon
        magnitude_spectrum = 20 * np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
        psd1D_img[t, :] = psd1D

    psd1D_img = torch.from_numpy(psd1D_img).float()
    psd1D_img = Variable(psd1D_img, requires_grad=True).to(device)

    # real image 1d power spectrum
    psd1D_rec = np.zeros([x_real.shape[0], N])
    for t in range(x_real.shape[0]):
        gen_imgs = x_real.permute(0, 2, 3, 1)
        img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
        img_gray = RGB2gray(img_numpy)
        fft = np.fft.fft2(img_gray)
        fshift = np.fft.fftshift(fft)
        fshift += epsilon
        magnitude_spectrum = 20 * np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
        psd1D_rec[t, :] = psd1D

    psd1D_rec = torch.from_numpy(psd1D_rec).float()
    psd1D_rec = Variable(psd1D_rec, requires_grad=True).to(device)

    loss_freq = criterion_freq(psd1D_rec, psd1D_img.detach())

    x_real_d_logit = D(x_real)
    x_fake_d_logit = D(x_fake)

    x_real_d_loss, x_fake_d_loss = d_loss_fn(x_real_d_logit, x_fake_d_logit)
    gp = gan.gradient_penalty(functools.partial(D),
                              x_real,
                              x_fake,
                              mode=args.gradient_penalty_mode)

    D_loss = (x_real_d_loss + x_fake_d_loss
              ) + gp * args.gradient_penalty_weight + 2 * loss_freq

    D.zero_grad()
    D_loss.backward()
    D_optimizer.step()

    return {'d_loss': x_real_d_loss + x_fake_d_loss, 'gp': gp}