示例#1
0
class discriminator(nn.Module):
    # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
    # Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S
    def __init__(self, input_dim=1, output_dim=1, input_size=32):
        super(discriminator, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.input_size = input_size

        self.conv = nn.Sequential(
            nn.Conv2d(self.input_dim, 64, 4, 2, 1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, 2, 1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
        )
        self.fc = nn.Sequential(
            nn.Linear(128 * (self.input_size // 4) * (self.input_size // 4),
                      1024),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),
            nn.Linear(1024, self.output_dim),
            nn.Sigmoid(),
        )
        initialize_weights(self)

    def forward(self, input):
        x = self.conv(input)
        x = x.view(-1, 128 * (self.input_size // 4) * (self.input_size // 4))
        x = self.fc(x)

        return x

# In[43]:

# networks

    G = generator(input_size=100, n_class=28 * 28)
    D = discriminator(input_size=28 * 28, n_class=1)
    # Adam optimizer
    G_optimizer = optim.Adam(G.parameters(), lr=lr)
    D_optimizer = optim.Adam(D.parameters(), lr=lr)

    train_hist = {}
    train_hist['D_losses'] = []
    train_hist['G_losses'] = []
    for epoch in tqdm(range(epochs)):
        D_losses = []
        G_losses = []
        for load_data in train_loader:

            # training discriminator ############
            # manually setting gradients to zero before mini batches
            D.zero_grad()

            # format
            load_data = load_data.view(-1, 28 * 28)
            # print load_data.size()[0]
            mini_batch = load_data.size()[0]

            D_real = torch.ones(mini_batch)
            D_fake = torch.zeros(mini_batch)

            # variables in pytorch can directly be accessed
            load_data = Variable(load_data)
            D_real = Variable(D_real)
            D_fake = Variable(D_fake)

            # first it takes real data
            D_result = D(load_data)
            # loss calculations due to real data : first term in eqn
            # comparing with ones labels
            D_real_loss = F.binary_cross_entropy(D_result, D_real)
            # D_real_scores = D_result

            ## for loss due to generated samples
            noise = torch.randn((mini_batch, 100))
            noise = Variable(noise)

            G_sample = G(noise)
            D_result = D(G_sample)
            # loss calculations due to generated data : second term in eqn
            # comparing with zero labels
            D_fake_loss = F.binary_cross_entropy(D_result, D_fake)
            # D_fake_scores = D_result
            # total D_loss
            D_train_loss = D_real_loss + D_fake_loss

            # training of network
            D_train_loss.backward()
            D_optimizer.step()

            D_losses.append(D_train_loss.data)

            # training generator ##############

            # manually setting gradients to zero before mini batches
            G.zero_grad()

            noise = torch.randn((mini_batch, 100))
            out = torch.ones(mini_batch)

            # variables in pytorch can directly be accessed
            noise = Variable(noise)
            out = Variable(out)
            # noise input to generator
            G_result = G(noise)
            D_result = D(G_result)
            # comparing with ones labels
            # loss calculations due to generated data : generator's loss
            G_train_loss = F.binary_cross_entropy(D_result, out)
            # training of network
            G_train_loss.backward()
            G_optimizer.step()

            G_losses.append(G_train_loss.data[0])

        print('[%d/%d]: loss_d: %.3f, loss_g: %.3f' %
              ((epoch + 1), epochs, torch.mean(torch.FloatTensor(D_losses)),
               torch.mean(torch.FloatTensor(G_losses))))

        p = dataset_dir + '/images/' + str(epoch + 1) + '.png'
        save_images((epoch + 1), save=True, path=p, dataset_dir=dataset_dir)
        train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
        train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))

    print("Finished training!")

    # In[ ]:

    ### showing and saving the results ###############
    loss_plots(train_hist,
               save=True,
               path=dataset_dir + '/EMNIST_GAN_train_hist.png')
    torch.save(G.state_dict(), dataset_dir + "/generator_param.pkl")
    torch.save(D.state_dict(), dataset_dir + "/discriminator_param.pkl")
    with open(dataset_dir + '/train_hist.pkl', 'wb') as f:
        pickle.dump(train_hist, f)

    # creating gif file
    images = []
    for i in range(epochs):
        img_name = dataset_dir + '/images/' + str(i + 1) + '.png'
        images.append(imageio.imread(img_name))
    imageio.mimsave(dataset_dir + '/gif_file.gif', images, fps=5)
示例#2
0
        G_optimizer.step()

        G_losses.append(G_train_loss.data[0])

    print('[%d/%d]: loss_d: %.3f, loss_g: %.3f' %
          ((epoch + 1), epochs, torch.mean(torch.FloatTensor(D_losses)),
           torch.mean(torch.FloatTensor(G_losses))))

    p = 'mnist_results/images/' + str(epoch + 1) + '.png'
    save_images((epoch + 1), save=True, path=p)
    train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
    train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))

print("Finished training!")

### showing and saving the results ###############
loss_plots(train_hist,
           save=False,
           path='mnist_results/MNIST_GAN_train_hist.png')
torch.save(G.state_dict(), "mnist_results/generator_param.pkl")
torch.save(D.state_dict(), "mnist_results/discriminator_param.pkl")
with open('mnist_results/train_hist.pkl', 'wb') as f:
    pickle.dump(train_hist, f)

# creating gif file
images = []
for i in range(epochs):
    img_name = 'mnist_results/images/' + str(i + 1) + '.png'
    images.append(imageio.imread(img_name))
imageio.mimsave('mnist_results/gif_file.gif', images, fps=5)
示例#3
0
            G_losses.append(G_train_loss.data[0])

        print('[%d/%d]: loss_d: %.3f, loss_g: %.3f' %
              ((epoch + 1), epochs, torch.mean(torch.FloatTensor(D_losses)),
               torch.mean(torch.FloatTensor(G_losses))))

        p = dataset_dir + '/images/' + str(epoch + 1) + '.png'
        save_images((epoch + 1), save=True, path=p, dataset_dir=dataset_dir)
        train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
        train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))

    print("Finished training!")

    ### showing and saving the results ###############
    loss_plots(train_hist,
               save=True,
               path=dataset_dir + '/EMNIST_GAN_train_hist.png')
    torch.save(G.state_dict(), dataset_dir + "/generator_param.pkl")
    torch.save(D.state_dict(), dataset_dir + "/discriminator_param.pkl")
    with open(dataset_dir + '/train_hist.pkl', 'wb') as f:
        pickle.dump(train_hist, f)

    # creating gif file
    images = []
    for i in range(epochs):
        img_name = dataset_dir + '/images/' + str(i + 1) + '.png'
        images.append(imageio.imread(img_name))
    imageio.mimsave(dataset_dir + '/gif_file.gif', images, fps=5)

# In[36]: