Пример #1
0
    def __init__(self, args):
        # Generator architecture
        self.G = nn.Sequential(nn.Linear(100, 256), nn.LeakyReLU(0.2),
                               nn.Linear(256, 512), nn.LeakyReLU(0.2),
                               nn.Linear(512, 1024), nn.LeakyReLU(0.2),
                               nn.Tanh())

        # Discriminator architecture
        self.D = nn.Sequential(nn.Linear(1024, 512), nn.LeakyReLU(0.2),
                               nn.Linear(512, 256), nn.LeakyReLU(0.2),
                               nn.Linear(256, 1), nn.Sigmoid())

        self.cuda = False
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Binary cross entropy loss and optimizer
        self.loss = nn.BCELoss()
        self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                            lr=0.0002,
                                            weight_decay=0.00001)
        self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                            lr=0.0002,
                                            weight_decay=0.00001)

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
        self.epochs = args.epochs
        self.batch_size = args.batch_size
    def __init__(self, args):
        print("WGAN_GradientPenalty init model.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # Check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 1e-4
        self.b1 = 0.5
        self.b2 = 0.999
        self.batch_size = 64

        # WGAN_gradient penalty uses ADAM
        self.d_optimizer = optim.Adam(self.D.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))
        self.g_optimizer = optim.Adam(self.G.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
        self.lambda_term = 10
Пример #3
0
    def __init__(self, args):
        print("WGAN_CP init model.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 0.00005

        self.batch_size = 64
        self.weight_cliping_limit = 0.01

        # WGAN with gradient clipping uses RMSprop instead of ADAM
        self.d_optimizer = torch.optim.RMSprop(self.D.parameters(),
                                               lr=self.learning_rate)
        self.g_optimizer = torch.optim.RMSprop(self.G.parameters(),
                                               lr=self.learning_rate)

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
Пример #4
0
    def __init__(self, args):
        print("DCGAN model initalization.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # binary cross entropy loss and optimizer
        self.loss = nn.BCELoss()

        self.cuda = False
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Using lower learning rate than suggested by (ADAM authors) lr=0.0002  and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
        self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                            lr=0.0002,
                                            betas=(0.5, 0.999))
        self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                            lr=0.0002,
                                            betas=(0.5, 0.999))

        self.epochs = args.epochs
        self.batch_size = args.batch_size

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
Пример #5
0
    def __init__(self, args):
        print("DCGAN model initalization.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels
        self.mode = args.mode

        self.name = ('res/_mode_' + str(args.mode) + '_beta_g_' +
                     str(args.beta_g) + '_beta_g_' + str(args.beta_g) +
                     '_beta_d_' + str(args.beta_d) + '_lr_g_' +
                     str(args.lr_g) + '_lr_d_' + str(args.lr_d) +
                     '_alpha_d_vjp_' + str(args.alpha_d_vjp) +
                     '_alpha_g_vjp_' + str(args.alpha_g_vjp) +
                     '_alpha_d_grad_' + str(args.alpha_d_grad) +
                     '_alpha_g_grad_' + str(args.alpha_g_grad))
        print(self.name)
        if not os.path.exists(self.name):
            os.makedirs(self.name)
        # binary cross entropy loss and optimizer
        self.loss = nn.BCEWithLogitsLoss()

        self.cuda = "False"
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Using lower learning rate than suggested by (ADAM authors) lr=0.0002  and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
        if self.mode == 'adam':
            self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                                lr=0.0002,
                                                betas=(0.5, 0.999))
            self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                                lr=0.0002,
                                                betas=(0.5, 0.999))
        elif self.mode == 'adam_vjp':
            self.d_optimizer = optim.VJP_Adam(self.D.parameters(),
                                              lr=args.lr_d,
                                              betas=(args.beta_d, 0.999),
                                              alpha_vjp=args.alpha_d_vjp,
                                              alpha_grad=args.alpha_d_grad)
            self.g_optimizer = optim.VJP_Adam(self.G.parameters(),
                                              lr=args.lr_g,
                                              betas=(args.beta_g, 0.999),
                                              alpha_vjp=args.alpha_g_vjp,
                                              alpha_grad=args.alpha_g_grad)
        self.epochs = args.epochs
        self.batch_size = args.batch_size

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
Пример #6
0
    def __init__(self, args):
        print("init model.")
        print(args)
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels, args.ssup)
        self.C = args.channels
        self.ssup = args.ssup
        self.loss_type = args.loss

        if self.ssup:
            self.save_path = 'sslgan_gp_ssup'
        else:
            self.save_path = 'sslgan_gp'

        # Check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 1e-4
        self.b1 = 0.5
        self.b2 = 0.999
        self.batch_size = 64

        # WGAN_gradient penalty uses ADAM
        self.d_optimizer = optim.Adam(self.D.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))
        self.g_optimizer = optim.Adam(self.G.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
        self.lambda_term = 10
        self.weight_rotation_loss_d = 1.0
        self.weight_rotation_loss_g = 0.5
        self.print_iter = 50
class WGAN_GP(object):
    def __init__(self, args):
        print("WGAN_GradientPenalty init model.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # Check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 1e-4
        self.b1 = 0.5
        self.b2 = 0.999
        self.batch_size = 64

        # WGAN_gradient penalty uses ADAM
        self.d_optimizer = optim.Adam(self.D.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))
        self.g_optimizer = optim.Adam(self.G.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
        self.lambda_term = 10

    def check_cuda(self, cuda_flag=False):
        if cuda_flag:
            self.cuda_index = 0
            self.cuda = True
            self.D.cuda(self.cuda_index)
            self.G.cuda(self.cuda_index)
            print("Cuda enabled flag: {}".format(self.cuda))

    def train(self, train_loader):
        self.t_begin = t.time()
        self.file = open("inception_score_graph.txt", "w")

        # Now batches are callable self.data.next()
        self.data = self.get_infinite_batches(train_loader)

        #one = torch.FloatTensor(1).cuda()
        #mone = one * -1

        for g_iter in range(self.generator_iters):

            # Requires grad, Generator requires_grad = False
            for p in self.D.parameters():
                p.requires_grad = True

            d_loss_real = 0
            d_loss_fake = 0
            Wasserstein_D = 0
            d_loss = 0
            images = None
            # Train Dicriminator forward-loss-backward-update self.critic_iter times while 1 Generator forward-loss-backward-update
            for d_iter in range(self.critic_iter):
                self.D.zero_grad()

                images = self.data.__next__()
                # Check for batch to have full batch_size
                if (images.size()[0] != self.batch_size):
                    continue

                z = torch.rand((self.batch_size, 100, 1, 1))

                if self.cuda:
                    images, z = Variable(images.cuda(
                        self.cuda_index)), Variable(z.cuda(self.cuda_index))
                else:
                    images, z = Variable(images), Variable(z)

                # Train discriminator
                # WGAN - Training discriminator more iterations than generator
                # Train with real images
                d_loss_real = self.D(images).mean()
                one = torch.ones(d_loss_real.shape,
                                 dtype=d_loss_real.dtype,
                                 device=d_loss_real.device)
                mone = one * -1
                d_loss_real.backward(mone)

                # Train with fake images
                if self.cuda:
                    z = Variable(torch.randn(self.batch_size, 100, 1,
                                             1)).cuda(self.cuda_index)
                else:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1))
                fake_images = self.G(z)
                d_loss_fake = self.D(fake_images).mean()
                d_loss_fake.backward(one)

                # Train with gradient penalty
                gradient_penalty = self.calculate_gradient_penalty(
                    images.data, fake_images.data)
                gradient_penalty.backward()

                d_loss = d_loss_fake - d_loss_real + gradient_penalty
                Wasserstein_D = d_loss_real - d_loss_fake
                self.d_optimizer.step()

            # Generator update
            for p in self.D.parameters():
                p.requires_grad = False  # to avoid computation

            self.G.zero_grad()
            # train generator
            # compute loss with fake images
            z = Variable(torch.randn(self.batch_size, 100, 1,
                                     1)).cuda(self.cuda_index)
            fake_images = self.G(z)
            g_loss = self.D(fake_images)
            g_loss = g_loss.mean()
            g_loss.backward(mone)
            g_cost = -g_loss
            self.g_optimizer.step()

            # Saving model and sampling images every 1000th generator iterations
            if (g_iter) % 1000 == 0:
                self.save_model()
                # # Workaround because graphic card memory can't store more than 830 examples in memory for generating image
                # # Therefore doing loop and generating 800 examples and stacking into list of samples to get 8000 generated images
                # # This way Inception score is more correct since there are different generated examples from every class of Inception model
                # sample_list = []
                # for i in range(125):
                #     samples  = self.data.__next__()
                # #     z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
                # #     samples = self.G(z)
                #     sample_list.append(samples.data.cpu().numpy())
                # #
                # # # Flattening list of list into one list
                # new_sample_list = list(chain.from_iterable(sample_list))
                # print("Calculating Inception Score over 8k generated images")
                # # # Feeding list of numpy arrays
                # inception_score = get_inception_score(new_sample_list, cuda=True, batch_size=32,
                #                                       resize=True, splits=10)

                if not os.path.exists('training_result_images/'):
                    os.makedirs('training_result_images/')

                # Denormalize images and save them in grid 8x8
                z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
                samples = self.G(z)
                samples = samples.mul(0.5).add(0.5)
                samples = samples.data.cpu()[:64]
                grid = utils.make_grid(samples)
                utils.save_image(
                    grid,
                    'training_result_images/img_generatori_iter_{}.png'.format(
                        str(g_iter).zfill(3)))

                # Testing
                time = t.time() - self.t_begin
                #print("Real Inception score: {}".format(inception_score))
                print("Generator iter: {}".format(g_iter))
                print("Time {}".format(time))

                # Write to file inception_score, gen_iters, time
                #output = str(g_iter) + " " + str(time) + " " + str(inception_score[0]) + "\n"
                #self.file.write(output)

                # ============ TensorBoard logging ============#
                # (1) Log the scalar values
                info = {
                    'Wasserstein distance': Wasserstein_D,
                    'Loss D': d_loss,
                    'Loss G': g_cost,
                    'Loss D Real': d_loss_real,
                    'Loss D Fake': d_loss_fake
                }

                for tag, value in info.items():
                    self.logger.scalar_summary(tag, value, g_iter + 1)

                # (3) Log the images
                info = {
                    'real_images': self.real_images(images,
                                                    self.number_of_images),
                    'generated_images':
                    self.generate_img(z, self.number_of_images)
                }

                for tag, images in info.items():
                    self.logger.image_summary(tag, images, g_iter + 1)

        self.t_end = t.time()
        print('Time of training-{}'.format((self.t_end - self.t_begin)))
        #self.file.close()

        # Save the trained parameters
        self.save_model()

    def evaluate(self, test_loader, D_model_path, G_model_path):
        self.load_model(D_model_path, G_model_path)
        z = Variable(torch.randn(self.batch_size, 100, 1,
                                 1)).cuda(self.cuda_index)
        samples = self.G(z)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.data.cpu()
        grid = utils.make_grid(samples)
        print("Grid of 8x8 images saved to 'dgan_model_image.png'.")
        utils.save_image(grid, 'dgan_model_image.png')

    def calculate_gradient_penalty(self, real_images, fake_images):
        etatmp = torch.FloatTensor(self.batch_size, 1, 1, 1).uniform_(0, 1)
        eta = etatmp.expand(self.batch_size, real_images.size(1),
                            real_images.size(2), real_images.size(3))
        if self.cuda:
            eta = eta.cuda(self.cuda_index)

        interpolatedcpu = eta * real_images + ((1 - eta) * fake_images)

        if self.cuda:
            interpolatedt = interpolatedcpu.cuda(self.cuda_index)

        # define it to calculate gradient
        interpolated = Variable(interpolatedt, requires_grad=True)

        # calculate probability of interpolated examples
        prob_interpolated = self.D(interpolated)

        # calculate gradients of probabilities with respect to examples
        gradients = autograd.grad(outputs=prob_interpolated,
                                  inputs=interpolated,
                                  grad_outputs=torch.ones(
                                      prob_interpolated.size()).cuda(
                                          self.cuda_index),
                                  create_graph=True,
                                  retain_graph=True)[0]

        grad_penalty = (
            (gradients.norm(2, dim=1) - 1)**2).mean() * self.lambda_term
        return grad_penalty

    def real_images(self, images, number_of_images):
        if (self.C == 3):
            #return self.to_np(images.view(-1, self.C, 32, 32)[:self.number_of_images])
            return images.detach().view(-1, self.C, 32,
                                        32)[:self.number_of_images]
        else:
            #return self.to_np(images.view(-1, 32, 32)[:self.number_of_images])
            return images.detach().view(-1, 32, 32)[:self.number_of_images]

    def generate_img(self, z, number_of_images):
        samples = self.G(z).data.cpu().numpy()[:number_of_images]
        generated_images = []
        for sample in samples:
            if self.C == 3:
                generated_images.append(sample.reshape(self.C, 32, 32))
            else:
                generated_images.append(sample.reshape(32, 32))
        return generated_images

    def to_np(self, x):
        return x.data.cpu().numpy()

    def save_model(self):
        torch.save(self.G.state_dict(), './generator.pkl')
        torch.save(self.D.state_dict(), './discriminator.pkl')
        print('Models save to ./generator.pkl & ./discriminator.pkl ')

    def load_model(self, D_model_filename, G_model_filename):
        D_model_path = os.path.join(os.getcwd(), D_model_filename)
        G_model_path = os.path.join(os.getcwd(), G_model_filename)
        self.D.load_state_dict(torch.load(D_model_path))
        self.G.load_state_dict(torch.load(G_model_path))
        print('Generator model loaded from {}.'.format(G_model_path))
        print('Discriminator model loaded from {}-'.format(D_model_path))

    def get_infinite_batches(self, data_loader):
        while True:
            for i, (images, _) in enumerate(data_loader):
                yield images

    def generate_latent_walk(self, number):
        if not os.path.exists('interpolated_images/'):
            os.makedirs('interpolated_images/')

        number_int = 10
        # interpolate between twe noise(z1, z2).
        z_intp = torch.FloatTensor(1, 100, 1, 1)
        z1 = torch.randn(1, 100, 1, 1)
        z2 = torch.randn(1, 100, 1, 1)
        if self.cuda:
            z_intp = z_intp.cuda()
            z1 = z1.cuda()
            z2 = z2.cuda()

        z_intp = Variable(z_intp)
        images = []
        alpha = 1.0 / float(number_int + 1)
        print(alpha)
        for i in range(1, number_int + 1):
            z_intp.data = z1 * alpha + z2 * (1.0 - alpha)
            alpha += alpha
            fake_im = self.G(z_intp)
            fake_im = fake_im.mul(0.5).add(0.5)  #denormalize
            images.append(fake_im.view(self.C, 32, 32).data.cpu())

        grid = utils.make_grid(images, nrow=number_int)
        utils.save_image(
            grid, 'interpolated_images/interpolated_{}.png'.format(
                str(number).zfill(3)))
        print("Saved interpolated images.")
class DCGAN_MODEL(object):
    def __init__(self, args):
        print("DCGAN model initalization.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # binary cross entropy loss and optimizer
        self.loss = nn.BCELoss()

        self.cuda = "False"
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Using lower learning rate than suggested by (ADAM authors) lr=0.0002  and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
        self.d_optimizer = torch.optim.Adam(self.D.parameters(), lr=0.0002, betas=(0.5, 0.999))
        self.g_optimizer = torch.optim.Adam(self.G.parameters(), lr=0.0002, betas=(0.5, 0.999))

        self.epochs = args.epochs
        self.batch_size = args.batch_size

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10

    # cuda support
    def check_cuda(self, cuda_flag=False):
        if cuda_flag:
            self.cuda = True
            self.D.cuda(self.cuda_index)
            self.G.cuda(self.cuda_index)
            self.loss = nn.BCELoss().cuda(self.cuda_index)
            print("Cuda enabled flag: ")
            print(self.cuda)


    def train(self, train_loader):
        self.t_begin = t.time()
        generator_iter = 0
        #self.file = open("inception_score_graph.txt", "w")

        for epoch in range(self.epochs):
            self.epoch_start_time = t.time()

            for i, (images, _) in enumerate(train_loader):
                # Check if round number of batches
                if i == train_loader.dataset.__len__() // self.batch_size:
                    break

                z = torch.rand((self.batch_size, 100, 1, 1))
                real_labels = torch.ones(self.batch_size)
                fake_labels = torch.zeros(self.batch_size)

                if self.cuda:
                    images, z = Variable(images).cuda(self.cuda_index), Variable(z).cuda(self.cuda_index)
                    real_labels, fake_labels = Variable(real_labels).cuda(self.cuda_index), Variable(fake_labels).cuda(self.cuda_index)
                else:
                    images, z = Variable(images), Variable(z)
                    real_labels, fake_labels = Variable(real_labels), Variable(fake_labels)


                # Train discriminator
                # Compute BCE_Loss using real images
                outputs = self.D(images)
                d_loss_real = self.loss(outputs, real_labels)
                real_score = outputs

                # Compute BCE Loss using fake images
                if self.cuda:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda(self.cuda_index)
                else:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1))
                fake_images = self.G(z)
                outputs = self.D(fake_images)
                d_loss_fake = self.loss(outputs, fake_labels)
                fake_score = outputs

                # Optimize discriminator
                d_loss = d_loss_real + d_loss_fake
                self.D.zero_grad()
                d_loss.backward()
                self.d_optimizer.step()

                # Train generator
                # Compute loss with fake images
                if self.cuda:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda(self.cuda_index)
                else:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1))
                fake_images = self.G(z)
                outputs = self.D(fake_images)
                g_loss = self.loss(outputs, real_labels)

                # Optimize generator
                self.D.zero_grad()
                self.G.zero_grad()
                g_loss.backward()
                self.g_optimizer.step()
                generator_iter += 1


                if generator_iter % 1000 == 0:
                    # Workaround because graphic card memory can't store more than 800+ examples in memory for generating image
                    # Therefore doing loop and generating 800 examples and stacking into list of samples to get 8000 generated images
                    # This way Inception score is more correct since there are different generated examples from every class of Inception model
                    # sample_list = []
                    # for i in range(10):
                    #     z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
                    #     samples = self.G(z)
                    #     sample_list.append(samples.data.cpu().numpy())
                    #
                    # # Flattening list of lists into one list of numpy arrays
                    # new_sample_list = list(chain.from_iterable(sample_list))
                    # print("Calculating Inception Score over 8k generated images")
                    # # Feeding list of numpy arrays
                    # inception_score = get_inception_score(new_sample_list, cuda=True, batch_size=32,
                    #                                       resize=True, splits=10)
                    print('Epoch-{}'.format(epoch + 1))
                    self.save_model(generator_iter)

                    if not os.path.exists('dcgan_conv_training_result_images/'):
                        os.makedirs('dcgan_conv_training_result_images/')

                    # Denormalize images and save them in grid 8x8
                    z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
                    samples = self.G(z)
                    samples = samples.mul(0.5).add(0.5)
                    samples = samples.data.cpu()[:64]
                    grid = utils.make_grid(samples)
                    utils.save_image(grid, 'dcgan_conv_training_result_images/img_generatori_iter_{}.png'.format(str(generator_iter).zfill(3)))

                    time = t.time() - self.t_begin
                    #print("Inception score: {}".format(inception_score))
                    print("Generator iter: {}".format(generator_iter))
                    print("Time {}".format(time))

                    # Write to file inception_score, gen_iters, time
                    #output = str(generator_iter) + " " + str(time) + " " + str(inception_score[0]) + "\n"
                    #self.file.write(output)


                if ((i + 1) % 100) == 0:
                    print("Epoch: [%2d] [%4d/%4d] D_loss: %.8f, G_loss: %.8f" %
                          ((epoch + 1), (i + 1), train_loader.dataset.__len__() // self.batch_size, d_loss.data[0], g_loss.data[0]))

                    z = Variable(torch.randn(self.batch_size, 100, 1, 1).cuda(self.cuda_index))

                    # TensorBoard logging
                    # Log the scalar values
                    info = {
                        'd_loss': d_loss.data[0],
                        'g_loss': g_loss.data[0]
                    }

                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, generator_iter)

                    # Log values and gradients of the parameters
                    for tag, value in self.D.named_parameters():
                        tag = tag.replace('.', '/')
                        self.logger.histo_summary(tag, self.to_np(value), generator_iter)
                        self.logger.histo_summary(tag + '/grad', self.to_np(value.grad), generator_iter)

                    # Log the images while training
                    info = {
                        'real_images': self.real_images(images, self.number_of_images),
                        'generated_images': self.generate_img(z, self.number_of_images)
                    }

                    for tag, images in info.items():
                        self.logger.image_summary(tag, images, generator_iter)


        self.t_end = t.time()
        print('Time of training-{}'.format((self.t_end - self.t_begin)))
        #self.file.close()

        # Save the trained parameters
        self.save_model(generator_iter)

    def evaluate(self, test_loader, D_model_path, G_model_path):
        self.load_model(D_model_path, G_model_path)
        z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda(self.cuda_index)
        samples = self.G(z)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.data.cpu()
        grid = utils.make_grid(samples)
        print("Grid of 8x8 images saved to 'dgan_model_image.png'.")
        utils.save_image(grid, 'dgan_conv_model_image.png')

    def real_images(self, images, number_of_images):
        if (self.C == 3):
            return self.to_np(images.view(-1, self.C, 32, 32)[:self.number_of_images])
        else:
            return self.to_np(images.view(-1, 32, 32)[:self.number_of_images])

    def generate_img(self, z, number_of_images):
        samples = self.G(z).data.cpu().numpy()[:number_of_images]
        generated_images = []
        for sample in samples:
            if self.C == 3:
                generated_images.append(sample.reshape(self.C, 32, 32))
            else:
                generated_images.append(sample.reshape(32, 32))
        return generated_images

    def to_np(self, x):
        return x.data.cpu().numpy()

    def save_model(self, g_iter):
        torch.save(self.G.state_dict(), './checkpoint' + "/{}_DCGAN_CONV_G.pth".format(g_iter + 1))
        torch.save(self.D.state_dict(), './checkpoint' + "/{}_DCGAN_CONV_D.pth".format(g_iter + 1))
        print('Models save to ./checkpoint {}_G.pth & ./checkpoint {}_D.pth ')

    def load_model(self, D_model_filename, G_model_filename):
        D_model_path = os.path.join(os.getcwd(), D_model_filename)
        G_model_path = os.path.join(os.getcwd(), G_model_filename)
        self.D.load_state_dict(torch.load(D_model_path))
        self.G.load_state_dict(torch.load(G_model_path))
        print('Generator model loaded from {}.'.format(G_model_path))
        print('Discriminator model loaded from {}-'.format(D_model_path))

    def generate_latent_walk(self, number):
        if not os.path.exists('interpolated_images/'):
            os.makedirs('interpolated_images/')

        # Interpolate between twe noise(z1, z2) with number_int steps between
        number_int = 10
        z_intp = torch.FloatTensor(1, 100, 1, 1)
        z1 = torch.randn(1, 100, 1, 1)
        z2 = torch.randn(1, 100, 1, 1)
        if self.cuda:
            z_intp = z_intp.cuda()
            z1 = z1.cuda()
            z2 = z2.cuda()

        z_intp = Variable(z_intp)
        images = []
        alpha = 1.0 / float(number_int + 1)
        print(alpha)
        for i in range(1, number_int + 1):
            z_intp.data = z1*alpha + z2*(1.0 - alpha)
            alpha += alpha
            fake_im = self.G(z_intp)
            fake_im = fake_im.mul(0.5).add(0.5) #denormalize
            images.append(fake_im.view(self.C,32,32).data.cpu())

        grid = utils.make_grid(images, nrow=number_int )
        utils.save_image(grid, 'interpolated_images/interpolated_{}.png'.format(str(number).zfill(3)))
        print("Saved interpolated images to interpolated_images/interpolated_{}.".format(str(number).zfill(3)))
Пример #9
0
class GAN(object):
    def __init__(self, args):
        # Generator architecture
        self.G = nn.Sequential(nn.Linear(100, 256), nn.LeakyReLU(0.2),
                               nn.Linear(256, 512), nn.LeakyReLU(0.2),
                               nn.Linear(512, 1024), nn.LeakyReLU(0.2),
                               nn.Tanh())

        # Discriminator architecture
        self.D = nn.Sequential(nn.Linear(1024, 512), nn.LeakyReLU(0.2),
                               nn.Linear(512, 256), nn.LeakyReLU(0.2),
                               nn.Linear(256, 1), nn.Sigmoid())

        self.cuda = False
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Binary cross entropy loss and optimizer
        self.loss = nn.BCELoss()
        self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                            lr=0.0002,
                                            weight_decay=0.00001)
        self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                            lr=0.0002,
                                            weight_decay=0.00001)

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
        self.epochs = args.epochs
        self.batch_size = args.batch_size

    # Cuda support
    def check_cuda(self, cuda_flag=False):
        if cuda_flag:
            self.cuda_index = 0
            self.cuda = True
            self.D.cuda(self.cuda_index)
            self.G.cuda(self.cuda_index)
            self.loss = nn.BCELoss().cuda(self.cuda_index)
            print("Cuda enabled flag: ")
            print(self.cuda)

    def train(self, train_loader):
        self.t_begin = time.time()
        generator_iter = 0

        for epoch in range(self.epochs + 1):
            for i, (images, _) in enumerate(train_loader):
                # Check if round number of batches
                if i == train_loader.dataset.__len__() // self.batch_size:
                    break

                # Flatten image 1,32x32 to 1024
                images = images.view(self.batch_size, -1)
                z = torch.rand((self.batch_size, 100))

                if self.cuda:
                    real_labels = Variable(torch.ones(self.batch_size)).cuda(
                        self.cuda_index)
                    fake_labels = Variable(torch.zeros(self.batch_size)).cuda(
                        self.cuda_index)
                    images, z = Variable(images.cuda(
                        self.cuda_index)), Variable(z.cuda(self.cuda_index))
                else:
                    real_labels = Variable(torch.ones(self.batch_size))
                    fake_labels = Variable(torch.zeros(self.batch_size))
                    images, z = Variable(images), Variable(z)

                # Train discriminator
                # compute BCE_Loss using real images where BCE_Loss(x, y): - y * log(D(x)) - (1-y) * log(1 - D(x))
                # [Training discriminator = Maximizing discriminator being correct]
                outputs = self.D(images)
                d_loss_real = self.loss(outputs.flatten(), real_labels)
                real_score = outputs

                # Compute BCELoss using fake images
                fake_images = self.G(z)
                outputs = self.D(fake_images)
                d_loss_fake = self.loss(outputs.flatten(), fake_labels)
                fake_score = outputs

                # Optimizie discriminator
                d_loss = d_loss_real + d_loss_fake
                self.D.zero_grad()
                d_loss.backward()
                self.d_optimizer.step()

                # Train generator
                if self.cuda:
                    z = Variable(
                        torch.randn(self.batch_size,
                                    100).cuda(self.cuda_index))
                else:
                    z = Variable(torch.randn(self.batch_size, 100))
                fake_images = self.G(z)
                outputs = self.D(fake_images)

                # We train G to maximize log(D(G(z))[maximize likelihood of discriminator being wrong] instead of
                # minimizing log(1-D(G(z)))[minizing likelihood of discriminator being correct]
                # From paper  [https://arxiv.org/pdf/1406.2661.pdf]
                g_loss = self.loss(outputs.flatten(), real_labels)

                # Optimize generator
                self.D.zero_grad()
                self.G.zero_grad()
                g_loss.backward()
                self.g_optimizer.step()
                generator_iter += 1

                if ((i + 1) % 100) == 0:
                    print("Epoch: [%2d] [%4d/%4d] D_loss: %.8f, G_loss: %.8f" %
                          ((epoch + 1),
                           (i + 1), train_loader.dataset.__len__() //
                           self.batch_size, d_loss.data, g_loss.data))

                    if self.cuda:
                        z = Variable(
                            torch.randn(self.batch_size,
                                        100).cuda(self.cuda_index))
                    else:
                        z = Variable(torch.randn(self.batch_size, 100))

                    # ============ TensorBoard logging ============#
                    # (1) Log the scalar values
                    info = {'d_loss': d_loss.data, 'g_loss': g_loss.data}

                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, i + 1)

                    # (2) Log values and gradients of the parameters (histogram)
                    for tag, value in self.D.named_parameters():
                        tag = tag.replace('.', '/')
                        self.logger.histo_summary(tag, self.to_np(value),
                                                  i + 1)
                        self.logger.histo_summary(tag + '/grad',
                                                  self.to_np(value.grad),
                                                  i + 1)

                    # (3) Log the images
                    info = {
                        'real_images':
                        self.to_np(
                            images.view(-1, 32, 32)[:self.number_of_images]),
                        'generated_images':
                        self.generate_img(z, self.number_of_images)
                    }

                    for tag, images in info.items():
                        self.logger.image_summary(tag, images, i + 1)

                if generator_iter % 1000 == 0:
                    print('Generator iter-{}'.format(generator_iter))
                    self.save_model()

                    if not os.path.exists('training_result_images/'):
                        os.makedirs('training_result_images/')

                    # Denormalize images and save them in grid 8x8
                    if self.cuda:
                        z = Variable(
                            torch.randn(self.batch_size,
                                        100).cuda(self.cuda_index))
                    else:
                        z = Variable(torch.randn(self.batch_size, 100))
                    samples = self.G(z)
                    samples = samples.mul(0.5).add(0.5)
                    samples = samples.data.cpu()
                    grid = utils.make_grid(samples)
                    utils.save_image(
                        grid,
                        'training_result_images/gan_image_iter_{}.png'.format(
                            str(generator_iter).zfill(3)))

        self.t_end = time.time()
        print('Time of training-{}'.format((self.t_end - self.t_begin)))
        # Save the trained parameters
        self.save_model()

    def evaluate(self, test_loader, D_model_path, G_model_path):
        self.load_model(D_model_path, G_model_path)
        if self.cuda:
            z = Variable(
                torch.randn(self.batch_size, 100).cuda(self.cuda_index))
        else:
            z = Variable(torch.randn(self.batch_size, 100))
        samples = self.G(z)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.data.cpu()
        grid = utils.make_grid(samples)
        print("Grid of 8x8 images saved to 'gan_model_image.png'.")
        utils.save_image(grid, 'gan_model_image.png')

    def generate_img(self, z, number_of_images):
        samples = self.G(z).data.cpu().numpy()[:number_of_images]
        generated_images = []
        for sample in samples:
            generated_images.append(sample.reshape(32, 32))
        return generated_images

    def to_np(self, x):
        return x.data.cpu().numpy()

    def save_model(self):
        torch.save(self.G.state_dict(), './generator.pkl')
        torch.save(self.D.state_dict(), './discriminator.pkl')
        print('Models save to ./generator.pkl & ./discriminator.pkl ')

    def load_model(self, D_model_filename, G_model_filename):
        D_model_path = os.path.join(os.getcwd(), D_model_filename)
        G_model_path = os.path.join(os.getcwd(), G_model_filename)
        self.D.load_state_dict(torch.load(D_model_path))
        self.G.load_state_dict(torch.load(G_model_path))
        print('Generator model loaded from {}.'.format(G_model_path))
        print('Discriminator model loaded from {}-'.format(D_model_path))
Пример #10
0
class WGAN_CP(object):
    def __init__(self, args):
        print("WGAN_CP init models.")
        channel = 3
        self.G = Generator(channel)
        self.D = Discriminator(channel)
        self.C = channel

        # check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 0.00005

        self.batch_size = 128
        self.weight_cliping_limit = 0.01

        # WGAN with gradient clipping uses RMSprop instead of ADAM
        self.d_optimizer = torch.optim.RMSprop(self.D.parameters(),
                                               lr=self.learning_rate)
        self.g_optimizer = torch.optim.RMSprop(self.G.parameters(),
                                               lr=self.learning_rate)

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5

    def check_cuda(self, cuda_flag=False):
        if cuda_flag:
            self.cuda_index = 0
            self.cuda = True
            self.D.cuda()
            self.G.cuda()
            print("Cuda enabled flag: {}".format(self.cuda))

    def train(self, train_loader):
        self.t_begin = t.time()
        #self.file = open("inception_score_graph.txt", "w")

        # Now batches are callable self.data.next()
        self.data = self.get_infinite_batches(train_loader)

        one = torch.FloatTensor([1])
        mone = one * -1

        if self.cuda:
            one = one.cuda()
            mone = mone.cuda()

        for g_iter in range(self.generator_iters):

            print("generator_iters(g_iter): ", g_iter)
            # Requires grad, Generator requires_grad = False
            for p in self.D.parameters():
                p.requires_grad = True

            # Train Dicriminator forward-loss-backward-update self.critic_iter times while 1 Generator forward-loss-backward-update
            for d_iter in range(self.critic_iter):
                print("critic_iter(d_iter): ", d_iter)
                self.D.zero_grad()

                # Clamp parameters to a range [-c, c], c=self.weight_cliping_limit
                for p in self.D.parameters():
                    p.data.clamp_(-self.weight_cliping_limit,
                                  self.weight_cliping_limit)

                images = self.data.__next__()
                # Check for batch to have full batch_size
                if (images.size()[0] != self.batch_size):
                    continue

                z = torch.rand((self.batch_size, 100, 1, 1))

                if self.cuda:
                    images, z = Variable(images.cuda()), Variable(z.cuda())
                else:
                    images, z = Variable(images), Variable(z)

                # Train discriminator
                # WGAN - Training discriminator more iterations than generator
                # Train with real images
                d_loss_real = self.D(images)
                d_loss_real = d_loss_real.mean(0).view(1)
                d_loss_real.backward(one)

                # Train with fake images
                if self.cuda:
                    z = Variable(torch.randn(self.batch_size, 100, 1,
                                             1)).cuda()
                else:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1))
                fake_images = self.G(z)
                d_loss_fake = self.D(fake_images)
                d_loss_fake = d_loss_fake.mean(0).view(1)
                d_loss_fake.backward(mone)

                d_loss = d_loss_fake - d_loss_real
                Wasserstein_D = d_loss_real - d_loss_fake
                self.d_optimizer.step()

            # Generator update
            for p in self.D.parameters():
                p.requires_grad = False  # to avoid computation

            self.G.zero_grad()

            # Train generator
            # Compute loss with fake images
            z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda()
            fake_images = self.G(z)
            g_loss = self.D(fake_images)
            g_loss = g_loss.mean().mean(0).view(1)
            g_loss.backward(one)
            g_cost = -g_loss
            self.g_optimizer.step()

            # Saving models and sampling images every 1000th generator iterations
            if (g_iter) % 1000 == 0:
                self.save_model()
                # Workaround because graphic card memory can't store more than 830 examples in memory for generating image
                # Therefore doing loop and generating 800 examples and stacking into list of samples to get 8000 generated images
                # This way Inception score is more correct since there are different generated examples from every class of Inception models
                # sample_list = []
                # for i in range(10):
                #     z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
                #     samples = self.G(z)
                #     sample_list.append(samples.data.cpu().numpy())
                #
                # # Flattening list of list into one list
                # new_sample_list = list(chain.from_iterable(sample_list))
                # print("Calculating Inception Score over 8k generated images")
                # # Feeding list of numpy arrays
                # inception_score = get_inception_score(new_sample_list, cuda=True, batch_size=32,
                #                                       resize=True, splits=10)

                if not os.path.exists('training_result_images/'):
                    os.makedirs('training_result_images/')

                # Denormalize images and save them in grid 8x8
                z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
                samples = self.G(z)
                samples = samples.mul(0.5).add(0.5)
                samples = samples.data.cpu()[:64]
                grid = utils.make_grid(samples)
                utils.save_image(
                    grid,
                    'training_result_images/img_generatori_iter_{}.png'.format(
                        str(g_iter).zfill(3)))

                # Testing
                time = t.time() - self.t_begin
                #print("Inception score: {}".format(inception_score))
                print("Generator iter: {}".format(g_iter))
                print("Time {}".format(time))

                # Write to file inception_score, gen_iters, time
                #output = str(g_iter) + " " + str(time) + " " + str(inception_score[0]) + "\n"
                #self.file.write(output)

                # ============ TensorBoard logging ============#
                # (1) Log the scalar values
                info = {
                    'Wasserstein distance': Wasserstein_D.data[0],
                    'Loss D': d_loss.data[0],
                    'Loss G': g_cost.data[0],
                    'Loss D Real': d_loss_real.data[0],
                    'Loss D Fake': d_loss_fake.data[0]
                }

                for tag, value in info.items():
                    self.logger.scalar_summary(tag, value, g_iter + 1)

                # (3) Log the images
                info = {
                    'real_images': self.real_images(images,
                                                    self.number_of_images),
                    'generated_images':
                    self.generate_img(z, self.number_of_images)
                }

                for tag, images in info.items():
                    self.logger.image_summary(tag, images, g_iter + 1)

        self.t_end = t.time()
        print('Time of training-{}'.format((self.t_end - self.t_begin)))
        #self.file.close()

        # Save the trained parameters
        self.save_model()

    def evaluate(self, test_loader, D_model_path, G_model_path):
        self.load_model(D_model_path, G_model_path)
        z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda()
        samples = self.G(z)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.data.cpu()
        grid = utils.make_grid(samples)
        print("Grid of 8x8 images saved to 'dgan_model_image.png'.")
        utils.save_image(grid, 'wgan_model_image.png')

    def real_images(self, images, number_of_images):
        if (self.C == 3):
            return self.to_np(
                images.view(-1, self.C, 32, 32)[:self.number_of_images])
        else:
            return self.to_np(images.view(-1, 32, 32)[:self.number_of_images])

    def generate_img(self, z, number_of_images):
        samples = self.G(z).data.cpu().numpy()[:number_of_images]
        generated_images = []
        for sample in samples:
            if self.C == 3:
                generated_images.append(sample.reshape(self.C, 64, 64))
            else:
                generated_images.append(sample.reshape(64, 64))
        return generated_images

    def to_np(self, x):
        return x.data.cpu().numpy()

    def save_model(self):
        torch.save(self.G.state_dict(), './generator.pkl')
        torch.save(self.D.state_dict(), './discriminator.pkl')
        print('Models save to ./generator.pkl & ./discriminator.pkl ')

    def load_model(self, D_model_filename, G_model_filename):
        D_model_path = os.path.join(os.getcwd(), D_model_filename)
        G_model_path = os.path.join(os.getcwd(), G_model_filename)
        self.D.load_state_dict(torch.load(D_model_path))
        self.G.load_state_dict(torch.load(G_model_path))
        print('Generator models loaded from {}.'.format(G_model_path))
        print('Discriminator models loaded from {}-'.format(D_model_path))

    def get_infinite_batches(self, data_loader):
        while True:
            for i, (images, _) in enumerate(data_loader):
                yield images

    def generate_latent_walk(self, number):
        if not os.path.exists('interpolated_images/'):
            os.makedirs('interpolated_images/')

        number_int = 10
        # interpolate between twe noise(z1, z2).
        z_intp = torch.FloatTensor(1, 100, 1, 1)
        z1 = torch.randn(1, 100, 1, 1)
        z2 = torch.randn(1, 100, 1, 1)
        if self.cuda:
            z_intp = z_intp.cuda()
            z1 = z1.cuda()
            z2 = z2.cuda()

        z_intp = Variable(z_intp)
        images = []
        alpha = 1.0 / float(number_int + 1)
        print(alpha)
        for i in range(1, number_int + 1):
            z_intp.data = z1 * alpha + z2 * (1.0 - alpha)
            alpha += alpha
            fake_im = self.G(z_intp)
            fake_im = fake_im.mul(0.5).add(0.5)  #denormalize
            images.append(fake_im.view(self.C, 32, 32).data.cpu())

        grid = utils.make_grid(images, nrow=number_int)
        utils.save_image(
            grid, 'interpolated_images/interpolated_{}.png'.format(
                str(number).zfill(3)))
        print("Saved interpolated images.")
Пример #11
0
class SSLGAN_SN(object):
    def __init__(self, args):
        print("init model.")
        print(args)
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels, args.ssup)
        self.C = args.channels
        self.ssup = args.ssup
        self.loss_type = args.loss
        
        if self.ssup:
            self.save_path = 'sslgan_sn_ssup'
        else:
            self.save_path = 'sslgan_sn'

        # Check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 1e-4
        self.b1 = 0.5
        self.b2 = 0.999
        self.batch_size = 64

        # WGAN_gradient penalty uses ADAM
        self.d_optimizer = optim.Adam(self.D.parameters(), lr=self.learning_rate, betas=(self.b1, self.b2))
        self.g_optimizer = optim.Adam(self.G.parameters(), lr=self.learning_rate, betas=(self.b1, self.b2))

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
        self.lambda_term = 10
        self.weight_rotation_loss_d = 1.0
        self.weight_rotation_loss_g = 0.5
        self.print_iter = 50

    def get_torch_variable(self, arg):
        if self.cuda:
            return Variable(arg).cuda(self.cuda_index)
        else:
            return Variable(arg)

    def check_cuda(self, cuda_flag=False):
        print(cuda_flag)
        if cuda_flag:
            self.cuda_index = 0
            self.cuda = True
            self.D.cuda(self.cuda_index)
            self.G.cuda(self.cuda_index)
            print("Cuda enabled flag: {}".format(self.cuda))
        else:
            self.cuda = False


    def train(self, train_loader):
        self.t_begin = t.time()

        if not os.path.exists('{}'.format(self.save_path)):
            os.makedirs('{}'.format(self.save_path))

        # Now batches are callable self.data.next()
        self.data = self.get_infinite_batches(train_loader)

        self.frozen_latent = self.get_torch_variable(torch.randn(1000, 100, 1, 1))
        self.forzen_images = self.data.__next__()
        self.get_torch_variable(self.forzen_images)

        one = torch.tensor(1, dtype=torch.float)
        mone = one * -1
        if self.cuda:
            one = one.cuda(self.cuda_index)
            mone = mone.cuda(self.cuda_index)

        for g_iter in range(self.generator_iters):
            # Requires grad, Generator requires_grad = False
            for p in self.D.parameters():
                p.requires_grad = True

            d_loss_real = 0
            d_loss_fake = 0
            Wasserstein_D = 0
            # Train Dicriminator forward-loss-backward-update self.critic_iter times while 1 Generator forward-loss-backward-update
            for d_iter in range(self.critic_iter):
                self.D.zero_grad()

                images = self.data.__next__()
                # Check for batch to have full batch_size
                if (images.size()[0] != self.batch_size):
                    continue

                z = torch.rand((self.batch_size, 100, 1, 1))
                images, z = self.get_torch_variable(images), self.get_torch_variable(z) 
                # fake_images = self.G(z)
                # Train discriminator
                # WGAN - Training discriminator more iterations than generator
                if self.ssup:
                    # rot real image
                    x = images
                    x_90 = x.transpose(2,3)
                    x_180 = x.flip(2,3)
                    x_270 = x.transpose(2,3).flip(2,3)
                    images = torch.cat((x,x_90,x_180,x_270),0)

                    d_loss_real, _, __, d_real_rot_logits, d_real_rot_prob = self.D(images)
                    d_loss_real = torch.mean(d_loss_real[:self.batch_size])
                    d_loss_real.backward(one, retain_graph=True)

                    z = self.get_torch_variable(torch.randn(self.batch_size, 100, 1, 1))
                    fake_images = self.G(z)

                    # rot fake image
                    x = fake_images
                    x_90 = x.transpose(2,3)
                    x_180 = x.flip(2,3)
                    x_270 = x.transpose(2,3).flip(2,3)
                    fake_images = torch.cat((x, x_90, x_180, x_270),0)

                    _, d_loss_fake, __, g_fake_rot_logits, g_fake_rot_prob = self.D(fake_images)
                    d_loss_fake = torch.mean(d_loss_fake[:self.batch_size])
                    d_loss_fake.backward(one)

                    rot_labels = torch.zeros(4*self.batch_size).cuda()
                    for i in range(4*self.batch_size):
                        if i < self.batch_size:
                            rot_labels[i] = 0
                        elif i < 2*self.batch_size:
                            rot_labels[i] = 1
                        elif i < 3*self.batch_size:
                            rot_labels[i] = 2
                        else:
                            rot_labels[i] = 3

                    rot_labels = F.one_hot(rot_labels.to(torch.int64), 4).float()
                    d_real_class_loss = torch.mean(F.binary_cross_entropy_with_logits(input = d_real_rot_logits, target = rot_labels)) * self.weight_rotation_loss_d
                    d_real_class_loss.backward(one)

                    d_loss = d_loss_real + d_loss_fake + d_real_class_loss

                    self.d_optimizer.step()
                    if g_iter % self.print_iter == 0:
                        print(f'  Discriminator iteration: {d_iter}/{self.critic_iter}, loss: {d_loss}, loss_rot: {d_real_class_loss}')

                else:
                    d_loss_real, _, __ = self.D(images)
                    d_loss_real = torch.mean(d_loss_real)
                    d_loss_real.backward(one)

                    z = self.get_torch_variable(torch.randn(self.batch_size, 100, 1, 1))
                    fake_images = self.G(z)
                    _, d_loss_fake, __ = self.D(fake_images)
                    d_loss_fake = torch.mean(d_loss_fake)
                    d_loss_fake.backward(one)

                    d_loss = d_loss_real + d_loss_fake

                    self.d_optimizer.step()
                    if g_iter % self.print_iter == 0:
                        print(f'  Discriminator iteration: {d_iter}/{self.critic_iter}, d_loss: {d_loss}')

            # Generator update
            for p in self.D.parameters():
                p.requires_grad = False  # to avoid computation

            self.G.zero_grad()

            # train generator
            z = self.get_torch_variable(torch.randn(self.batch_size, 100, 1, 1))
            fake_images = self.G(z)

            if self.ssup:
                # rot fake image
                x = fake_images
                x_90 = x.transpose(2,3)
                x_180 = x.flip(2,3)
                x_270 = x.transpose(2,3).flip(2,3)
                fake_images = torch.cat((x, x_90, x_180, x_270),0)

                _, __, g_loss, g_fake_rot_logits, g_fake_rot_prob = self.D(fake_images)
                g_loss = g_loss[:self.batch_size].mean()
                g_loss.backward(mone, retain_graph=True)
                g_loss = -g_loss

                rot_labels = torch.zeros(4*self.batch_size).cuda()
                for i in range(4*self.batch_size):
                    if i < self.batch_size:
                        rot_labels[i] = 0
                    elif i < 2*self.batch_size:
                        rot_labels[i] = 1
                    elif i < 3*self.batch_size:
                        rot_labels[i] = 2
                    else:
                        rot_labels[i] = 3
                
                rot_labels = F.one_hot(rot_labels.to(torch.int64), 4).float()
                g_fake_class_loss = torch.mean(F.binary_cross_entropy_with_logits(input = g_fake_rot_logits, target = rot_labels)) * self.weight_rotation_loss_g
                g_fake_class_loss.backward(one)

                g_loss += g_fake_class_loss
                self.g_optimizer.step()
                if g_iter % self.print_iter == 0:
                    print(f'Generator iteration: {g_iter}/{self.generator_iters}, g_loss: {g_loss}, rot_loss: {g_fake_class_loss}')
                
            else:
                _, __, g_loss = self.D(fake_images)
                g_loss = g_loss.mean()
                g_loss.backward(mone)
                g_cost = -g_loss
                self.g_optimizer.step()
                if g_iter % self.print_iter == 0:
                    print(f'Generator iteration: {g_iter}/{self.generator_iters}, g_loss: {g_loss}')

            # Saving model and sampling images every 1000th generator iterations
            if (g_iter) % SAVE_PER_TIMES == 0:
                self.save_model()
                fake_images = self.G(self.frozen_latent)

                # Denormalize images and save them in grid 8x8
                z = self.get_torch_variable(torch.randn(self.batch_size, 100, 1, 1))
                samples = self.G(z)
                samples = samples.mul(0.5).add(0.5)
                samples = samples.data.cpu()[:64]
                grid = utils.make_grid(samples)
                utils.save_image(grid, '{}/img_generatori_iter_{}.png'.format(self.save_path, str(g_iter).zfill(3)))

                # Testing
                time = t.time() - self.t_begin
                # fid
                fid_value = calculate_fid_given_images(fake_images, self.forzen_images)
                f = open('{}/fid.txt'.format(self.save_path), 'a')
                f.write('Iter:{} Fid:{}\n'.format(str(g_iter), str(fid_value)))
                f.close()
                print("Generator iter: {}, Time: {}, FID: {}".format(g_iter, time, fid_value))

                # ============ TensorBoard logging ============#
                # (1) Log the scalar values
                info = {
                    # 'Wasserstein distance': Wasserstein_D.data,
                    'Loss D': d_loss.data,
                    'Loss G': g_loss.data,
                    'Loss D Real': d_loss_real.data,
                    'Loss D Fake': d_loss_fake.data

                }

                for tag, value in info.items():
                    self.logger.scalar_summary(tag, value, g_iter + 1)

                # (3) Log the images
                info = {
                    'real_images': self.real_images(images, self.number_of_images),
                    'generated_images': self.generate_img(z, self.number_of_images)
                }

                for tag, images in info.items():
                    self.logger.image_summary(tag, images, g_iter + 1)



        self.t_end = t.time()
        print('Time of training-{}'.format((self.t_end - self.t_begin)))
        #self.file.close()

        # Save the trained parameters
        self.save_model()

    def evaluate(self, test_loader, D_model_path, G_model_path):
        self.load_model(D_model_path, G_model_path)
        z = self.get_torch_variable(torch.randn(self.batch_size, 100, 1, 1))
        samples = self.G(z)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.data.cpu()
        grid = utils.make_grid(samples)
        print("Grid of 8x8 images saved to 'dgan_model_image.png'.")
        utils.save_image(grid, 'dgan_model_image.png')


    def real_images(self, images, number_of_images):
        if (self.C == 3):
            return self.to_np(images.view(-1, self.C, 32, 32)[:self.number_of_images])
        else:
            return self.to_np(images.view(-1, 32, 32)[:self.number_of_images])

    def generate_img(self, z, number_of_images):
        samples = self.G(z).data.cpu().numpy()[:number_of_images]
        generated_images = []
        for sample in samples:
            if self.C == 3:
                generated_images.append(sample.reshape(self.C, 32, 32))
            else:
                generated_images.append(sample.reshape(32, 32))
        return generated_images

    def to_np(self, x):
        return x.data.cpu().numpy()

    def save_model(self):
        torch.save(self.G.state_dict(), '{}/generator.pkl'.format(self.save_path))
        torch.save(self.D.state_dict(), '{}/discriminator.pkl'.format(self.save_path))
        print('Models save to ./generator.pkl & ./discriminator.pkl ')

    def load_model(self, D_model_filename, G_model_filename):
        D_model_path = os.path.join(os.getcwd(), D_model_filename)
        G_model_path = os.path.join(os.getcwd(), G_model_filename)
        self.D.load_state_dict(torch.load(D_model_path))
        self.G.load_state_dict(torch.load(G_model_path))
        print('Generator model loaded from {}.'.format(G_model_path))
        print('Discriminator model loaded from {}-'.format(D_model_path))

    def get_infinite_batches(self, data_loader):
        while True:
            for i, (images, _) in enumerate(data_loader):
                yield images

    def generate_latent_walk(self, number):
        if not os.path.exists('interpolated_images/'):
            os.makedirs('interpolated_images/')

        number_int = 10
        # interpolate between twe noise(z1, z2).
        z_intp = torch.FloatTensor(1, 100, 1, 1)
        z1 = torch.randn(1, 100, 1, 1)
        z2 = torch.randn(1, 100, 1, 1)
        if self.cuda:
            z_intp = z_intp.cuda()
            z1 = z1.cuda()
            z2 = z2.cuda()

        z_intp = Variable(z_intp)
        images = []
        alpha = 1.0 / float(number_int + 1)
        print(alpha)
        for i in range(1, number_int + 1):
            z_intp.data = z1*alpha + z2*(1.0 - alpha)
            alpha += alpha
            fake_im = self.G(z_intp)
            fake_im = fake_im.mul(0.5).add(0.5) #denormalize
            images.append(fake_im.view(self.C,32,32).data.cpu())

        grid = utils.make_grid(images, nrow=number_int )
        utils.save_image(grid, 'interpolated_images/interpolated_{}.png'.format(str(number).zfill(3)))
        print("Saved interpolated images.")
Пример #12
0
        if os.path.isfile(args.checkpoint):

            print("=> loading checkpoint '{}'".format(args.checkpoint))
            checkpoint = torch.load(args.checkpoint)
            optimizer.load_state_dict(checkpoint['optimizer'])
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.checkpoint))
        else:
            print("=> no checkpoint found at '{}'".format(args.checkpoint))

    checkpoint_saver = nn_module_utils.CheckPointSaver([
        'epoch_loss', 'epoch_accuracy', 'batch_loss', 'batch_accuracy',
        'test_loss'
    ], checkpoint_dir)

    logger = Logger('./logs')
    os.system("mkdir -p ./results")

    def train(epoch):
        model.train()
        train_loss = 0
        train_reconstruction_loss = 0
        train_kld_loss = 0
        step = (epoch - 1) * len(train_loader.dataset) + 1
        for batch_idx, (input, ground_truth, _) in enumerate(train_loader):
            input = Variable(input)
            ground_truth = Variable(ground_truth)
            if args.cuda:
                input = input.cuda()
                ground_truth = ground_truth.cuda()
Пример #13
0
class DCGAN_MODEL(object):
    def __init__(self, args):
        print("DCGAN model initalization.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels
        self.mode = args.mode

        self.name = ('res/_mode_' + str(args.mode) + '_beta_g_' +
                     str(args.beta_g) + '_beta_g_' + str(args.beta_g) +
                     '_beta_d_' + str(args.beta_d) + '_lr_g_' +
                     str(args.lr_g) + '_lr_d_' + str(args.lr_d) +
                     '_alpha_d_vjp_' + str(args.alpha_d_vjp) +
                     '_alpha_g_vjp_' + str(args.alpha_g_vjp) +
                     '_alpha_d_grad_' + str(args.alpha_d_grad) +
                     '_alpha_g_grad_' + str(args.alpha_g_grad))
        print(self.name)
        if not os.path.exists(self.name):
            os.makedirs(self.name)
        # binary cross entropy loss and optimizer
        self.loss = nn.BCEWithLogitsLoss()

        self.cuda = "False"
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Using lower learning rate than suggested by (ADAM authors) lr=0.0002  and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
        if self.mode == 'adam':
            self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                                lr=0.0002,
                                                betas=(0.5, 0.999))
            self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                                lr=0.0002,
                                                betas=(0.5, 0.999))
        elif self.mode == 'adam_vjp':
            self.d_optimizer = optim.VJP_Adam(self.D.parameters(),
                                              lr=args.lr_d,
                                              betas=(args.beta_d, 0.999),
                                              alpha_vjp=args.alpha_d_vjp,
                                              alpha_grad=args.alpha_d_grad)
            self.g_optimizer = optim.VJP_Adam(self.G.parameters(),
                                              lr=args.lr_g,
                                              betas=(args.beta_g, 0.999),
                                              alpha_vjp=args.alpha_g_vjp,
                                              alpha_grad=args.alpha_g_grad)
        self.epochs = args.epochs
        self.batch_size = args.batch_size

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10

    # cuda support
    def check_cuda(self, cuda_flag=False):
        if cuda_flag:
            self.cuda = True
            self.D.cuda(self.cuda_index)
            self.G.cuda(self.cuda_index)
            self.loss = nn.BCEWithLogitsLoss().cuda(self.cuda_index)
            print("Cuda enabled flag: ")
            print(self.cuda)

    def train(self, train_loader):
        self.t_begin = t.time()
        generator_iter = 0
        self.file = open("inception_score_graph.txt", "w")
        dis_params_flatten = parameters_to_vector(self.D.parameters())
        gen_params_flatten = parameters_to_vector(self.G.parameters())

        # just to fill the empty grad buffers
        if self.cuda:
            z = Variable(torch.randn(self.batch_size, 100, 1,
                                     1)).cuda(self.cuda_index)
        else:
            z = Variable(torch.randn(self.batch_size, 100, 1, 1))
        fake_images = self.G(z)
        outputs = self.D(fake_images)
        fake_labels = torch.zeros(self.batch_size)
        fake_labels = Variable(fake_labels).cuda(self.cuda_index)
        d_loss_fake = self.loss(outputs.squeeze(), fake_labels)
        (0.0 * d_loss_fake).backward(create_graph=True)
        d_loss_fake = 0.0
        best_inception_score = 0.0
        d_loss_list = []
        g_loss_list = []
        for epoch in range(self.epochs):
            self.epoch_start_time = t.time()

            for i, (images, _) in enumerate(train_loader):
                # Check if round number of batches
                if i == train_loader.dataset.__len__() // self.batch_size:
                    break

                z = torch.rand((self.batch_size, 100, 1, 1))
                real_labels = torch.ones(self.batch_size)
                fake_labels = torch.zeros(self.batch_size)

                if self.cuda:
                    images, z = Variable(images).cuda(
                        self.cuda_index), Variable(z).cuda(self.cuda_index)
                    real_labels, fake_labels = Variable(real_labels).cuda(
                        self.cuda_index), Variable(fake_labels).cuda(
                            self.cuda_index)
                else:
                    images, z = Variable(images), Variable(z)
                    real_labels, fake_labels = Variable(real_labels), Variable(
                        fake_labels)

                # Train discriminator
                # Compute BCE_Loss using real images
                outputs = self.D(images)
                d_loss_real = self.loss(outputs.squeeze(), real_labels)
                real_score = outputs

                # Compute BCE Loss using fake images
                if self.cuda:
                    z = Variable(torch.randn(self.batch_size, 100, 1,
                                             1)).cuda(self.cuda_index)
                else:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1))
                fake_images = self.G(z)
                outputs = self.D(fake_images)
                d_loss_fake = self.loss(outputs.squeeze(), fake_labels)
                fake_score = outputs

                # Optimize discriminator
                d_loss = d_loss_real + d_loss_fake
                if self.mode == 'adam':
                    self.D.zero_grad()
                    d_loss.backward()
                    self.d_optimizer.step()
                elif self.mode == 'adam_vjp':
                    gradsD = torch.autograd.grad(outputs=d_loss,
                                                 inputs=(self.D.parameters()),
                                                 create_graph=True)
                    for p, g in zip(self.D.parameters(), gradsD):
                        p.grad = g
                    gen_params_flatten_prev = gen_params_flatten + 0.0
                    gen_params_flatten = parameters_to_vector(
                        self.G.parameters()) + 0.0
                    grad_gen_params_flatten = optim.parameters_grad_to_vector(
                        self.G.parameters())
                    delta_gen_params_flatten = gen_params_flatten - gen_params_flatten_prev
                    vjp_dis = torch.autograd.grad(
                        grad_gen_params_flatten,
                        self.D.parameters(),
                        grad_outputs=delta_gen_params_flatten)
                    self.d_optimizer.step(vjps=vjp_dis)

                # Train generator
                # Compute loss with fake images
                if self.cuda:
                    z = Variable(torch.randn(self.batch_size, 100, 1,
                                             1)).cuda(self.cuda_index)
                else:
                    z = Variable(torch.randn(self.batch_size, 100, 1, 1))
                fake_images = self.G(z)
                outputs = self.D(fake_images)
                # non-zero_sum
                g_loss = self.loss(outputs.squeeze(), real_labels)
                # zer_sum:
                # g_loss = - self.loss(outputs.squeeze(), fake_labels)
                # Optimize generator
                if self.mode == 'adam':
                    self.D.zero_grad()
                    self.G.zero_grad()
                    g_loss.backward()
                    self.g_optimizer.step()
                elif self.mode == 'adam_vjp':
                    gradsG = torch.autograd.grad(outputs=g_loss,
                                                 inputs=(self.G.parameters()),
                                                 create_graph=True)
                    for p, g in zip(self.G.parameters(), gradsG):
                        p.grad = g

                    dis_params_flatten_prev = dis_params_flatten + 0.0
                    dis_params_flatten = parameters_to_vector(
                        self.D.parameters()) + 0.0
                    grad_dis_params_flatten = optim.parameters_grad_to_vector(
                        self.D.parameters())
                    delta_dis_params_flatten = dis_params_flatten - dis_params_flatten_prev
                    vjp_gen = torch.autograd.grad(
                        grad_dis_params_flatten,
                        self.G.parameters(),
                        grad_outputs=delta_dis_params_flatten)
                    self.g_optimizer.step(vjps=vjp_gen)

                generator_iter += 1

                if generator_iter % 1000 == 0:
                    # Workaround because graphic card memory can't store more than 800+ examples in memory for generating image
                    # Therefore doing loop and generating 800 examples and stacking into list of samples to get 8000 generated images
                    # This way Inception score is more correct since there are different generated examples from every class of Inception model
                    sample_list = []
                    for i in range(10):
                        z = Variable(torch.randn(800, 100, 1,
                                                 1)).cuda(self.cuda_index)
                        samples = self.G(z)
                        sample_list.append(samples.data.cpu().numpy())

                    # Flattening list of lists into one list of numpy arrays
                    new_sample_list = list(chain.from_iterable(sample_list))
                    print(
                        "Calculating Inception Score over 8k generated images")
                    # Feeding list of numpy arrays
                    inception_score = get_inception_score(new_sample_list,
                                                          cuda=True,
                                                          batch_size=32,
                                                          resize=True,
                                                          splits=10)
                    print('Epoch-{}'.format(epoch + 1))
                    print(inception_score)
                    if inception_score >= best_inception_score:
                        best_inception_score = inception_score
                        self.save_model()

                    # Denormalize images and save them in grid 8x8
                    z = Variable(torch.randn(800, 100, 1,
                                             1)).cuda(self.cuda_index)
                    samples = self.G(z)
                    samples = samples.mul(0.5).add(0.5)
                    samples = samples.data.cpu()[:64]
                    grid = utils.make_grid(samples)
                    utils.save_image(
                        grid, self.name + '/iter_{}_inception_{}_.png'.format(
                            str(generator_iter).zfill(3),
                            str(inception_score)))

                    time = t.time() - self.t_begin
                    print("Inception score: {}".format(inception_score))
                    print("Generator iter: {}".format(generator_iter))
                    print("Time {}".format(time))

                    # Write to file inception_score, gen_iters, time
                    output = str(generator_iter) + " " + str(time) + " " + str(
                        inception_score[0]) + "\n"
                    self.file.write(output)

                if ((i + 1) % 100) == 0:
                    d_loss_list += [d_loss.item()]
                    g_loss_list += [g_loss.item()]
                    print("Epoch: [%2d] [%4d/%4d] D_loss: %.8f, G_loss: %.8f" %
                          ((epoch + 1),
                           (i + 1), train_loader.dataset.__len__() //
                           self.batch_size, d_loss.item(), g_loss.item()))

                    z = Variable(
                        torch.randn(self.batch_size, 100, 1,
                                    1).cuda(self.cuda_index))

                    # TensorBoard logging
                    # Log the scalar values
                    info = {'d_loss': d_loss.item(), 'g_loss': g_loss.item()}

                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, generator_iter)

                    # Log values and gradients of the parameters
                    for tag, value in self.D.named_parameters():
                        tag = tag.replace('.', '/')
                        self.logger.histo_summary(tag, self.to_np(value),
                                                  generator_iter)
                        self.logger.histo_summary(tag + '/grad',
                                                  self.to_np(value.grad),
                                                  generator_iter)

                    # Log the images while training
                    info = {
                        'real_images':
                        self.real_images(images, self.number_of_images),
                        'generated_images':
                        self.generate_img(z, self.number_of_images)
                    }

                    for tag, images in info.items():
                        self.logger.image_summary(tag, images, generator_iter)

        self.t_end = t.time()
        print('Time of training-{}'.format((self.t_end - self.t_begin)))

        # Save the trained parameters
        self.save_final_model()
        np.save(self.name + '/d_loss', np.array(d_loss_list))
        np.save(self.name + '/g_loss', np.array(g_loss_list))
        self.evaluate(train_loader, self.name + '/discriminator.pkl',
                      self.name + '/generator.pkl')

    def evaluate(self, test_loader, D_model_path, G_model_path):
        self.load_model(D_model_path, G_model_path)
        self.G.eval()
        all_fake = []
        for i in range(10):
            z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
            fake = self.G(z)
            all_fake += [fake]
        all_fake = torch.cat(all_fake, 0)
        inception_score = calc_inception_score(
            (all_fake.cpu().data.numpy() + 1.0) * 128)
        print(inception_score)
        z = Variable(torch.randn(self.batch_size, 100, 1,
                                 1)).cuda(self.cuda_index)
        samples = self.G(z)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.data.cpu()
        grid = utils.make_grid(samples)
        print("Grid of 8x8 images saved to 'dgan_model_image.png'.")
        utils.save_image(
            grid, self.name + '/best_inception_score' + str(inception_score) +
            '.png')

    def real_images(self, images, number_of_images):
        if (self.C == 3):
            return self.to_np(
                images.view(-1, self.C, 32, 32)[:self.number_of_images])
        else:
            return self.to_np(images.view(-1, 32, 32)[:self.number_of_images])

    def generate_img(self, z, number_of_images):
        samples = self.G(z).data.cpu().numpy()[:number_of_images]
        generated_images = []
        for sample in samples:
            if self.C == 3:
                generated_images.append(sample.reshape(self.C, 32, 32))
            else:
                generated_images.append(sample.reshape(32, 32))
        return generated_images

    def to_np(self, x):
        return x.data.cpu().numpy()

    def save_model(self):
        torch.save(self.G.state_dict(), self.name + '/generator.pkl')
        torch.save(self.D.state_dict(), self.name + '/discriminator.pkl')
        print('Models save to generator.pkl & discriminator.pkl ')

    def save_final_model(self):
        torch.save(self.G.state_dict(), self.name + '/final_generator.pkl')
        torch.save(self.D.state_dict(), self.name + '/final_discriminator.pkl')
        print('Final models save to generator.pkl & discriminator.pkl ')

    def load_model(self, D_model_filename, G_model_filename):
        D_model_path = os.path.join(os.getcwd(), D_model_filename)
        G_model_path = os.path.join(os.getcwd(), G_model_filename)
        self.D.load_state_dict(torch.load(D_model_path))
        self.G.load_state_dict(torch.load(G_model_path))
        print('Generator model loaded from {}.'.format(G_model_path))
        print('Discriminator model loaded from {}-'.format(D_model_path))

    def generate_latent_walk(self, number):
        if not os.path.exists('interpolated_images/'):
            os.makedirs('interpolated_images/')

        # Interpolate between twe noise(z1, z2) with number_int steps between
        number_int = 10
        z_intp = torch.FloatTensor(1, 100, 1, 1)
        z1 = torch.randn(1, 100, 1, 1)
        z2 = torch.randn(1, 100, 1, 1)
        if self.cuda:
            z_intp = z_intp.cuda()
            z1 = z1.cuda()
            z2 = z2.cuda()

        z_intp = Variable(z_intp)
        images = []
        alpha = 1.0 / float(number_int + 1)
        print(alpha)
        for i in range(1, number_int + 1):
            z_intp.data = z1 * alpha + z2 * (1.0 - alpha)
            alpha += alpha
            fake_im = self.G(z_intp)
            fake_im = fake_im.mul(0.5).add(0.5)  # denormalize
            images.append(fake_im.view(self.C, 32, 32).data.cpu())

        grid = utils.make_grid(images, nrow=number_int)
        utils.save_image(
            grid, 'interpolated_images/interpolated_{}.png'.format(
                str(number).zfill(3)))
        print(
            "Saved interpolated images to interpolated_images/interpolated_{}."
            .format(str(number).zfill(3)))