예제 #1
0
    def __init__(self):
        # Initialize the DCGAN model and optimizers
        params = {
            "bsize": 128,  # Batch size during DCGAN training.
            'imsize':
            64,  # Spatial size of training images. All images will be resized to this size during preprocessing.
            'nc':
            3,  # Number of channles in the training images. For coloured images this is 3.
            'nz':
            100,  # Size of the Z latent vector (the input to the generator).
            'ngf':
            64,  # Size of feature maps in the generator. The depth will be multiples of this.
            'ndf':
            64,  # Size of features maps in the discriminator. The depth will be multiples of this.
            'nepochs': 10,  # Number of training epochs.
            'lr': 0.0002,  # Learning rate for optimizers
            'beta1': 0.5,  # Beta1 hyperparam for Adam optimizer
            'save_epoch': 2
        }
        self.netG = Generator(params).to(device)
        self.netD = Discriminator(params).to(device)
        filename = "./checkpoint/saved_model.pth"
        # filename = "pretrained_model.pth"
        if os.path.isfile(filename):
            saved_model = torch.load(filename,
                                     map_location=torch.device(device))
            self.netG.load_state_dict(saved_model['G_state_dict'])
            self.netD.load_state_dict(saved_model['D_state_dict'])
        else:
            print("Trained DCGAN not found!")
            sys.exit()
            # params = saved_model['params']

        self.batch_size = 64  # Batch size for inpainting
        self.image_size = params['imsize']  # 64
        self.num_channels = params['nc']  # 3
        self.z_dim = params['nz']  # 100
        self.nIters = 3000  # Inpainting Iterations
        self.blending_steps = 100
        self.lamda = 0.2
        self.momentum = 0.9
        self.lr = 0.0003
예제 #2
0
    def create_model_restore_weight(self, checkpoint_dir):
        # create model
        self.generator = Generator()
        self.discriminator = Discriminator()

        # restore model weights
        generator_optimizer = tf.keras.optimizers.Adam(1e-4)
        discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

        checkpoint = tf.train.Checkpoint(
            generator_optimizer=generator_optimizer,
            discriminator_optimizer=discriminator_optimizer,
            generator=self.generator,
            discriminator=self.discriminator)

        ckpt_manager = tf.train.CheckpointManager(checkpoint,
                                                  checkpoint_dir,
                                                  max_to_keep=5)

        # if a checkpoint exists, restore the latest checkpoint.
        if ckpt_manager.latest_checkpoint:
            checkpoint.restore(ckpt_manager.latest_checkpoint)
            print('Latest checkpoint restored!')
예제 #3
0
def get_model(opt, cuda):
    # Loss function
    adversarial_loss = torch.nn.BCELoss()

    # Initialize generator and discriminator
    generator = Generator(opt)
    discriminator = Discriminator(opt)

    if cuda:
        generator.cuda()
        discriminator.cuda()
        adversarial_loss.cuda()

    # Initialize weights
    generator.apply(weights_init_normal)
    discriminator.apply(weights_init_normal)

    return generator, discriminator, adversarial_loss
예제 #4
0
class Inpaint:
    def __init__(self):
        # Initialize the DCGAN model and optimizers
        params = {
            "bsize": 128,  # Batch size during DCGAN training.
            'imsize':
            64,  # Spatial size of training images. All images will be resized to this size during preprocessing.
            'nc':
            3,  # Number of channles in the training images. For coloured images this is 3.
            'nz':
            100,  # Size of the Z latent vector (the input to the generator).
            'ngf':
            64,  # Size of feature maps in the generator. The depth will be multiples of this.
            'ndf':
            64,  # Size of features maps in the discriminator. The depth will be multiples of this.
            'nepochs': 10,  # Number of training epochs.
            'lr': 0.0002,  # Learning rate for optimizers
            'beta1': 0.5,  # Beta1 hyperparam for Adam optimizer
            'save_epoch': 2
        }
        self.netG = Generator(params).to(device)
        self.netD = Discriminator(params).to(device)
        filename = "./checkpoint/saved_model.pth"
        # filename = "pretrained_model.pth"
        if os.path.isfile(filename):
            saved_model = torch.load(filename,
                                     map_location=torch.device(device))
            self.netG.load_state_dict(saved_model['G_state_dict'])
            self.netD.load_state_dict(saved_model['D_state_dict'])
        else:
            print("Trained DCGAN not found!")
            sys.exit()
            # params = saved_model['params']

        self.batch_size = 64  # Batch size for inpainting
        self.image_size = params['imsize']  # 64
        self.num_channels = params['nc']  # 3
        self.z_dim = params['nz']  # 100
        self.nIters = 3000  # Inpainting Iterations
        self.blending_steps = 100
        self.lamda = 0.2
        self.momentum = 0.9
        self.lr = 0.0003

    def image_gradient(self, image):
        a = torch.Tensor([[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]]).to(device)
        a = torch.repeat_interleave(a, 3, dim=1)
        G_x = F.conv2d(image, a, padding=1)
        b = torch.Tensor([[[[1, 2, 1], [0, 0, 0], [-1, -2, -1]]]]).to(device)
        b = torch.repeat_interleave(b, 3, dim=1)
        G_y = F.conv2d(image, b, padding=1)
        return G_x, G_y

    def posisson_blending(self, corrupted_images, generated_images, masks):
        print("Starting Poisson blending ...")
        initial_guess = masks * corrupted_images + (1 -
                                                    masks) * generated_images
        image_optimum = nn.Parameter(
            torch.FloatTensor(initial_guess.detach().cpu().numpy()).to(device))
        optimizer_blending = optim.Adam([image_optimum])
        generated_grad_x, generated_grad_y = self.image_gradient(
            generated_images)

        for epoch in range(self.blending_steps):
            optimizer_blending.zero_grad()
            image_optimum_grad_x, image_optimum_grad_y = self.image_gradient(
                image_optimum)
            blending_loss = torch.sum(
                ((generated_grad_x - image_optimum_grad_x)**2 +
                 (generated_grad_y - image_optimum_grad_y)**2) * (1 - masks))
            blending_loss.backward()
            image_optimum.grad = image_optimum.grad * (1 - masks)
            optimizer_blending.step()

            print("[Epoch: {}/{}] \t[Blending loss: {:.3f}]   \r".format(
                1 + epoch, self.blending_steps, blending_loss),
                  end="")
        print("")

        del optimizer_blending
        return image_optimum.detach()

    def get_imp_weighting(self, masks, nsize):
        # TODO: Implement eq 3
        kernel = torch.ones((1, 1, nsize, nsize)).to(device)
        kernel = kernel / torch.sum(kernel)
        weighted_masks = torch.empty(masks.shape[0], 3, masks.shape[2],
                                     masks.shape[3]).to(device)
        padded_masks = F.pad(masks, (2, 2, 2, 2), "constant", 1)
        # print(kernel.shape, masks.shape)
        conv = F.conv2d(input=padded_masks, weight=kernel, padding=1)
        # print(conv.shape)
        # print(masks.shape)
        weighted_masks = masks * conv
        # print(weighted_masks.shape)
        # for i in range(len(masks)):
        #     weighted_mask = masks[i] * convolve2d(masks[i], kernel, mode='same', boundary='symm')
        #     # create 3 channels to match image channels
        #     weighted_mask = torch.unsqueeze(weighted_mask,0)
        #     weighted_masks[i] = torch.repeat_interleave(weighted_mask, 3, dim = 0)

        return weighted_masks

    def run_dcgan(self, z_i):
        G_z_i = self.netG(z_i)
        label = torch.full((z_i.shape[0], ),
                           real_label,
                           dtype=torch.float,
                           device=device)
        D_G_z_i = torch.squeeze(self.netD(G_z_i))
        errG = criterion(D_G_z_i, label)

        return G_z_i, errG

    def get_context_loss(self, G_z_i, images, masks):
        # Calculate context loss
        # Implement eq 4
        nsize = 7
        W = self.get_imp_weighting(masks, nsize)
        # TODO: verify norm output. Its probably a vector. We need a single value
        context_loss = torch.sum(torch.abs(torch.mul(W, G_z_i - images)))

        return context_loss

    def generate_z_hat(self, real_images, images, masks):
        # Backpropagation for z
        # z = 2*torch.rand(images.shape[0], self.z_dim, 1, 1, device=device) -1
        z = torch.randn(images.shape[0], self.z_dim, 1, 1, device=device)
        opt = torch.optim.Adam([z], lr=0.0003)
        v = 0
        for i in range(self.nIters):
            opt.zero_grad()
            z.requires_grad = True
            G_z_i, errG = self.run_dcgan(z)
            perceptual_loss = errG
            context_loss = self.get_context_loss(G_z_i, images, masks)
            loss = context_loss + (self.lamda * perceptual_loss)
            # loss.backward()
            grad = torch.autograd.grad(loss, z)

            # Update z
            # https://github.com/moodoki/semantic_image_inpainting/blob/extensions/src/model.py#L182
            v_prev = v
            v = self.momentum * v - self.lr * grad[0]
            with torch.no_grad():
                z += (-self.momentum * v_prev + (1 + self.momentum) * v)
                z = torch.clamp(z, -1, 1)
            # TODO: Not sure if this next would work to update z. Check
            # opt.step()

            # TODO: Clip Z to be between -1 and 1

            if i % 100 == 0:
                print(i)
            if i % 250 == 0:
                with torch.no_grad():
                    # print("masks shape:", masks.shape)
                    channeled_masks = torch.empty(masks.shape[0], 3,
                                                  masks.shape[2],
                                                  masks.shape[3]).to(device)
                    # print("channeled_masks shape:", channeled_masks.shape)
                    # unsq_masks = torch.unsqueeze(masks,1)
                    # print("unsq masks shape: ", unsq_masks.shape)
                    for j in range(len(masks)):
                        channeled_masks[j] = torch.repeat_interleave(masks[j],
                                                                     3,
                                                                     dim=0)
                    merged_images = channeled_masks * images + (
                        1 - channeled_masks) * G_z_i
                    plt.figure(figsize=(8, 8))
                    plt.subplot(2, 1, 1)
                    plt.axis("off")
                    plt.title("Real Images")
                    plt.imshow(
                        np.transpose(
                            vutils.make_grid(real_images.to(device)[:bsize],
                                             padding=5,
                                             normalize=True).cpu(), (1, 2, 0)))

                    plt.subplot(2, 1, 2)
                    plt.axis("off")
                    plt.title("Generated Images")
                    plt.imshow(
                        np.transpose(
                            vutils.make_grid(merged_images.to(device)[:bsize],
                                             padding=5,
                                             normalize=True).cpu(), (1, 2, 0)))
                    plt.savefig("iter_{}.png".format(i))
                    # plt.show()

        return z

    def main(self, dataloader):
        for i, data in enumerate(dataloader, 0):
            print(i)
            if i > 0:
                break
            real_images = data[0].to(device)
            corrupt_images = data[1].to(device)
            masks = (data[2] / 255).to(device)
            masks.unsqueeze_(1)
            # Get optimal latent space vectors (Z^) for corrupt images
            z_hat = self.generate_z_hat(real_images, corrupt_images, masks)
            with torch.no_grad():
                G_z_hat, _ = self.run_dcgan(z_hat)
                channeled_masks = torch.empty(masks.shape[0], 3,
                                              masks.shape[2],
                                              masks.shape[3]).to(device)
                for j in range(len(masks)):
                    channeled_masks[j] = torch.repeat_interleave(masks[j],
                                                                 3,
                                                                 dim=0)
                merged_images = channeled_masks * corrupt_images + (
                    1 - channeled_masks) * G_z_hat
            # blended_images = np.empty_like(corrupt_images.cpu().numpy())
            # for k in range(len(merged_images)):
            # blended_images[k] = blend( corrupt_images[k].cpu().numpy(), G_z_hat[k].detach().cpu().numpy(), (masks[k]).cpu().numpy() )
            # blended_images = self.posisson_blending( corrupt_images, G_z_hat.detach(), channeled_masks )
            plt.figure(figsize=(8, 8))
            plt.subplot(3, 1, 1)
            plt.axis("off")
            plt.title("Real Images")
            plt.imshow(
                np.transpose(
                    vutils.make_grid(real_images.to(device)[:bsize],
                                     padding=5,
                                     normalize=True).cpu(), (1, 2, 0)))

            plt.subplot(3, 1, 2)
            plt.axis("off")
            plt.title("Corrupt Images")
            plt.imshow(
                np.transpose(
                    vutils.make_grid(corrupt_images.to(device)[:bsize],
                                     padding=5,
                                     normalize=True).cpu(), (1, 2, 0)))
            plt.savefig("final.png")

            plt.subplot(3, 1, 3)
            plt.axis("off")
            plt.title("Generated Images")
            plt.imshow(
                np.transpose(
                    vutils.make_grid(merged_images.to(device)[:bsize],
                                     padding=5,
                                     normalize=True).cpu(), (1, 2, 0)))
            plt.savefig("final.png")
예제 #5
0
        print("=> loaded checkpoint '{}' (epoch {})"
                  .format(filename, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(filename))

    return G_model, D_model, G_optimizer, D_optimizer, start_epoch, params

# Create the generator
netG = Generator(params).to(device)
# Apply the weights_init function to randomly initialize all weights to mean=0, stdev=0.2.
netG.apply(weights_init)
# Print the model
print(netG)

# Create the Discriminator
netD = Discriminator(params).to(device)
# Apply the weights_init function to randomly initialize all weights to mean=0, stdev=0.2.
netD.apply(weights_init)
# Print the model
print(netD)

# Create batch of latent vectors that we will use to visualize the progression of the generator
fixed_noise = torch.randn(64, params['nz'], 1, 1, device=device)

optimizerD = optim.Adam(netD.parameters(), lr=params['lr'], betas=(params['beta1'], 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=params['lr'], betas=(params['beta1'], 0.999))

# Load model if available
netG, netD, optimizerG, optimizerD, start_epoch, params = load_checkpoint(netG,netD,optimizerG,optimizerD,params,checkpoint_filename)
netG = netG.to(device)
netD = netD.to(device)
예제 #6
0
def main():
    """
    function to train model, plot generated samples, compute training score,
    save train model, load train model, and evaluate model
    """
    # device = torch.device('cuda:0')
    device = torch.device('cpu')

    skip_training = False
    batch_size = 100
    n_epochs = 20

    scorer = Scorer()
    scorer.to(device)

    nz = 10
    netG = Generator(nz=nz, ngf=64, nc=1)
    netD = Discriminator(nc=1, ndf=64)

    netD = netD.to(device)
    netG = netG.to(device)

    if not skip_training:
        d_optimizer = torch.optim.Adam(netD.parameters(),
                                       lr=0.0002,
                                       betas=(0.5, 0.999))
        g_optimizer = torch.optim.Adam(netG.parameters(),
                                       lr=0.0002,
                                       betas=(0.5, 0.999))

        for epoch in range(n_epochs):
            for i, data in enumerate(trainloader, 0):
                images, _ = data
                images = images.to(device)

                netD.train()
                netD.zero_grad()
                d_optimizer.zero_grad()
                noise = torch.randn(batch_size, nz, 1, 1, device=device)
                fake_images = netG(noise)
                d_loss_real, D_real, d_loss_fake, D_fake = discriminator_loss(
                    netD, images, fake_images)
                d_loss_real.backward(retain_graph=True)
                d_loss_fake.backward(retain_graph=True)
                d_loss = d_loss_real + d_loss_fake
                d_optimizer.step()

                netG.train()
                netG.zero_grad()
                g_optimizer.zero_grad()
                g_loss = generator_loss(netD, fake_images)
                g_loss.backward(retain_graph=True)
                g_optimizer.step()

            with torch.no_grad():
                # Plot generated images
                z = torch.randn(144, nz, 1, 1, device=device)
                samples = netG(z)
                tools.plot_generated_samples(samples)

                # Compute score
                z = torch.randn(1000, nz, 1, 1, device=device)
                samples = netG(z)
                samples = (samples + 1) / 2  # Re-normalize to [0, 1]
                score = scorer(samples)

            print('Train Epoch {}: D_real {}: D_fake{}: score {}'.format(
                epoch + 1, D_real, D_fake, score))

        tools.save_model(netG, '11_dcgan_g.pth')
        tools.save_model(netD, '11_dcgan_d.pth')
    else:
        nz = 10
        netG = Generator(nz=nz, ngf=64, nc=1)
        netD = Discriminator(nc=1, ndf=64)

        tools.load_model(netG, '11_dcgan_g.pth', device)
        tools.load_model(netD, '11_dcgan_d.pth', device)

        with torch.no_grad():
            z = torch.randn(1000, nz, 1, 1, device=device)
            samples = (netG(z) + 1) / 2
            score = scorer(samples)

        print(f'The trained DCGAN achieves a score of {score:.5f}')
def train_dcgan_main(data_dir, BATCH_SIZE, EPOCHS, noise_dim, num_examples_to_generate, checkpoint_dir,
                     store_produce_image_dir):
    # Notice the use of `tf.function`
    # This annotation causes the function to be "compiled".
    @tf.function
    def train_step(images):
        noise = tf.random.normal([BATCH_SIZE, noise_dim])

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            generated_images = generator(noise, training=True)

            real_output = discriminator(images, training=True)
            fake_output = discriminator(generated_images, training=True)

            gen_loss = generator_loss(fake_output)
            disc_loss = discriminator_loss(real_output, fake_output)

        gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

        generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
        discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
        return gen_loss, disc_loss

    def train(dataset, start_epoch, epochs):
        for epoch in range(start_epoch, epochs):
            start = time.time()

            for batch_idx, image_batch in enumerate(dataset):
                gen_loss, disc_loss = train_step(image_batch)
                if (batch_idx + 1) % 500 == 0:
                    print('Epoch {} Batch {} Generator Loss {:.4f}\t Discriminator Loss {:.4f}'.format(
                        epoch + 1, batch_idx + 1, gen_loss.numpy(), disc_loss.numpy()))
            # Produce images for the GIF as we go
            # display.clear_output(wait=True)
            generate_and_save_images(generator, epoch, seed, store_produce_image_dir)

            # Save the model every 3 epochs
            if (epoch + 1) % 3 == 0:
                checkpoint.save(file_prefix=checkpoint_prefix)

            print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() - start))

        # Generate after the final epoch
        # display.clear_output(wait=True)
        generate_and_save_images(generator, epochs, seed, store_produce_image_dir)

    # prepare data
    train_dataset = get_celebface_dataset(data_dir, new_height=218, new_width=178,
                                          BATCH_SIZE=128, BUFFER_SIZE=100000)

    # create model
    generator = Generator()
    discriminator = Discriminator()

    generator_optimizer = tf.keras.optimizers.Adam(1e-4)
    discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
                                     discriminator_optimizer=discriminator_optimizer,
                                     generator=generator,
                                     discriminator=discriminator)

    ckpt_manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=10)

    start_epoch = 0
    # if a checkpoint exists, restore the latest checkpoint.
    if ckpt_manager.latest_checkpoint:
        start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
        checkpoint.restore(ckpt_manager.latest_checkpoint)
        print(f'Latest checkpoint restored! start_epoch is {start_epoch}')

    # We will reuse this seed overtime (so it's easier)
    # to visualize progress in the animated GIF)
    seed = tf.random.normal([num_examples_to_generate, noise_dim])

    # train model
    train(train_dataset, start_epoch, EPOCHS)

    # produce images to gif file
    images_to_gif(anim_file='dcgan.gif', store_produce_image_dir=store_produce_image_dir)
예제 #8
0
        weighted_structure_loss = torch.sum(
            torch.mul(normalized_real_eig_vals, structure_loss))
        return magnitude_loss + weighted_structure_loss

    netG = Generator(ngpu).to(device)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netC = AlexNet(ngpu).to(device)
    netC.load_state_dict(torch.load('./best_model.pth'))
    print(netC)
    netC.eval()

    netD = Discriminator(ngpu).to(device)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()
    criterion_sum = nn.BCELoss(reduction='sum')

    fixed_noise = torch.randn(opt.batchSize, 100, 1, 1, device=device)

    real_label = 1
    fake_label = 0

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
def train_dcgan_main(BATCH_SIZE, EPOCHS, noise_dim, num_examples_to_generate, checkpoint_dir, store_produce_image_dir):
    # Notice the use of `tf.function`
    # This annotation causes the function to be "compiled".
    @tf.function
    def train_step(images):
        noise = tf.random.normal([BATCH_SIZE, noise_dim])

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            generated_images = generator(noise, training=True)

            real_output = discriminator(images, training=True)
            fake_output = discriminator(generated_images, training=True)

            gen_loss = generator_loss(fake_output)
            disc_loss = discriminator_loss(real_output, fake_output)

        gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

        generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
        discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))

    def train(dataset, epochs):
        for epoch in range(epochs):
            start = time.time()

            for image_batch in dataset:
                train_step(image_batch)

            # Produce images for the GIF as we go
            # display.clear_output(wait=True)
            generate_and_save_images(generator, epoch + 1, seed, store_produce_image_dir)

            # Save the model every 5 epochs
            if (epoch + 1) % 5 == 0:
                checkpoint.save(file_prefix=checkpoint_prefix)

            print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() - start))

        # Generate after the final epoch
        # display.clear_output(wait=True)
        generate_and_save_images(generator, epochs, seed, store_produce_image_dir)

    # prepare data
    train_dataset = get_mnist_dataset(BATCH_SIZE, BUFFER_SIZE=60000)

    # create model
    generator = Generator()
    discriminator = Discriminator()

    generator_optimizer = tf.keras.optimizers.Adam(1e-4)
    discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
                                     discriminator_optimizer=discriminator_optimizer,
                                     generator=generator,
                                     discriminator=discriminator)



    # We will reuse this seed overtime (so it's easier)
    # to visualize progress in the animated GIF)
    seed = tf.random.normal([num_examples_to_generate, noise_dim])

    # train model
    train(train_dataset, EPOCHS)

    # produce images to gif file
    images_to_gif(anim_file='dcgan.gif', store_produce_image_dir=store_produce_image_dir)
예제 #10
0
            filename, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(filename))

    return G_model, D_model, G_optimizer, D_optimizer, start_epoch, params


# Create the generator
netG = Generator(params).to(device)
# Apply the weights_init function to randomly initialize all weights to mean=0, stdev=0.2.
netG.apply(weights_init)
# Print the model
print(netG)

# Create the Discriminator
netD = Discriminator(params).to(device)
# Apply the weights_init function to randomly initialize all weights to mean=0, stdev=0.2.
netD.apply(weights_init)
# Print the model
print(netD)

# Create batch of latent vectors that we will use to visualize the progression of the generator
fixed_noise = torch.randn(64, params['nz'], 1, 1, device=device)

# Establish convention for real and fake labels during training
real_label = 1
fake_label = 0
# Initialize BCELoss function
criterion = nn.BCELoss()
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(),