def __init__(self):
        # Load data
        self.datagenerator = DataGenerator(X_train,
                                           Y_train,
                                           batch_size=batch_size)

        # Prameters
        self.height = 224
        self.width = 224
        self.channels = 3

        self.optimizer = Adam(lr=0.0002, beta_1=0.5)

        self.n_show_image = 1  # Number of images to show
        self.history = []
        self.number = 1
        self.save_path = 'D:/Generated Image/Training' + str(time) + '/'

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=self.optimizer,
                                   metrics=['accuracy'])

        # Build and compile the generator
        self.generator = self.build_generator()
        self.generator.compile(loss=self.vgg19_loss, optimizer=self.optimizer)

        # Save .json
        generator_model_json = self.generator.to_json()

        # Check folder presence
        if not os.path.isdir(self.save_path + 'Json/'):
            os.makedirs(self.save_path + 'Json/')

        with open(self.save_path + 'Json/generator_model.json',
                  "w") as json_file:
            json_file.write(generator_model_json)

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.height, self.width, self.channels))
        image = self.generator(z)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        valid = self.discriminator(image)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, [image, valid])
        self.combined.compile(loss=[self.vgg19_loss, 'binary_crossentropy'],
                              loss_weights=[1., 1e-3],
                              optimizer=self.optimizer)
    def __init__(self):
        self.height = height
        self.width = width
        self.channels = channels
        self.batch_size = batch_size
        self.epochs = epochs
        self.line = line
        self.n_show_image = n_show_image
        self.vgg = vgg
        self.optimizerD = optimizerD
        self.optimizerC = optimizerC
        self.DG = DataGenerator(X_train, Y_train, batch_size=batch_size)
        self.DGP = DataGenerator_predict(X_predict, batch_size=batch_size)
        self.number = number

        patch = int(self.height / 2**1)
        self.disc_patch = (patch, patch, 3)

        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=self.optimizerD)

        self.generator = self.build_generator()

        self.discriminator.trainable = False

        side = Input(shape=(self.height, self.width, self.channels))
        front = Input(shape=(self.height, self.width, self.channels))
        image = self.generator(side)

        valid = self.discriminator([front, image])

        self.combined = Model([side, front], [image, valid])
        self.combined.compile(loss=['mae', "mse"],
                              loss_weights=[100, 1],
                              optimizer=self.optimizerC)
class DCGAN():
    def __init__(self):
        # Load data
        self.datagenerator = DataGenerator(X_train,
                                           Y_train,
                                           batch_size=batch_size)

        # Prameters
        self.height = 224
        self.width = 224
        self.channels = 3

        self.optimizer = Adam(lr=0.0002, beta_1=0.5)

        self.n_show_image = 1  # Number of images to show
        self.history = []
        self.number = 1
        self.save_path = 'D:/Generated Image/Training' + str(time) + '/'

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=self.optimizer,
                                   metrics=['accuracy'])

        # Build and compile the generator
        self.generator = self.build_generator()
        self.generator.compile(loss=self.vgg19_loss, optimizer=self.optimizer)

        # Save .json
        generator_model_json = self.generator.to_json()

        # Check folder presence
        if not os.path.isdir(self.save_path + 'Json/'):
            os.makedirs(self.save_path + 'Json/')

        with open(self.save_path + 'Json/generator_model.json',
                  "w") as json_file:
            json_file.write(generator_model_json)

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.height, self.width, self.channels))
        image = self.generator(z)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        valid = self.discriminator(image)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, [image, valid])
        self.combined.compile(loss=[self.vgg19_loss, 'binary_crossentropy'],
                              loss_weights=[1., 1e-3],
                              optimizer=self.optimizer)

        # self.combined.summary()

    def residual_block(self, layer, filters, kernel_size, strides):
        generator = layer

        layer = Conv2D(filters=filters,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding='same')(generator)
        layer = BatchNormalization(momentum=0.5)(layer)

        # Using Parametric ReLU
        layer = PReLU(alpha_initializer='zeros',
                      alpha_regularizer=None,
                      alpha_constraint=None,
                      shared_axes=[1, 2])(layer)
        layer = Conv2D(filters=filters,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding='same')(layer)
        output = BatchNormalization(momentum=0.5)(layer)

        model = add([generator, output])

        return model

    def up_sampling_block(self, layer, filters, kernel_size, strides):
        # In place of Conv2D and UpSampling2D we can also use Conv2DTranspose (Both are used for Deconvolution)
        # Even we can have our own function for deconvolution (i.e one made in Utils.py)
        # layer = Conv2DTranspose(filters = filters, kernel_size = kernal_size, strides = strides, padding = 'same)(layer)
        layer = Conv2D(filters=filters,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding='same')(layer)
        layer = UpSampling2D(size=(2, 2))(layer)
        layer = LeakyReLU(alpha=0.2)(layer)

        return layer

    # computes VGG loss or content loss
    def vgg19_loss(self, true, prediction):
        vgg19 = VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=(self.height, self.width, self.channels))
        # Make trainable as False

        vgg19.trainable = False

        for layer in vgg19.layers:
            layer.trainable = False

        model = Model(inputs=vgg19.input,
                      outputs=vgg19.get_layer('block5_conv4').output)
        model.trainable = False

        return K.mean(K.square(model(true) - model(prediction)))

    def build_generator(self):
        generator_input = Input(shape=(self.height, self.width, self.channels))

        generator_layer = Conv2D(filters=16,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_input)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)
        generator_layer = Conv2D(filters=32,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)
        generator_layer = Conv2D(filters=64,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)

        previous_output = generator_layer

        # Using 16 Residual Blocks
        for i in range(16):
            generator_layer = self.residual_block(layer=generator_layer,
                                                  filters=64,
                                                  kernel_size=(3, 3),
                                                  strides=(1, 1))

        generator_layer = Conv2D(filters=64,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.5)(generator_layer)
        generator_layer = add([previous_output, generator_layer])

        # Using 2 UpSampling Blocks
        for j in range(3):
            generator_layer = self.up_sampling_block(layer=generator_layer,
                                                     filters=256,
                                                     kernel_size=3,
                                                     strides=1)

        generator_layer = Conv2D(filters=self.channels,
                                 kernel_size=(9, 9),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_output = Activation('tanh')(generator_layer)

        model = Model(inputs=generator_input, outputs=generator_output)

        # model.summary()

        return model

    def build_discriminator(self):
        vgg16_layer = VGGFace(include_top=False,
                              model='vgg16',
                              weights='vggface',
                              input_shape=(self.height, self.width,
                                           self.channels))

        vgg16_layer.trainable = False

        # vgg16_layer.summary()

        vgg16_last_layer = vgg16_layer.get_layer('pool5').output
        layer = Flatten()(vgg16_last_layer)

        discriminator_output = Dense(1, activation='sigmoid')(layer)

        model = Model(inputs=vgg16_layer.input, outputs=discriminator_output)

        # model.summary()

        return model

    def train(self, epochs, batch_size, save_interval):
        # Adversarial ground truths
        fake = np.zeros((batch_size, 1))
        real = np.ones((batch_size, 1))

        print('Training')

        for k in range(1, epochs + 1):
            for l in tqdm(range(1, self.datagenerator.__len__() + 1)):
                # Select images
                side_image, front_image = self.datagenerator.__getitem__(l)

                # optimizer.zero_grad()

                generated_image = self.generator.predict(side_image)

                self.discriminator.trainable = True

                # Train the discriminator (real classified as ones and generated as zeros)
                discriminator_fake_loss = self.discriminator.train_on_batch(
                    generated_image, fake)
                discriminator_real_loss = self.discriminator.train_on_batch(
                    front_image, real)
                discriminator_loss = 0.5 * np.add(discriminator_fake_loss,
                                                  discriminator_real_loss)

                self.discriminator.trainable = False

                # Train the generator (wants discriminator to mistake images as real)
                generator_loss = self.combined.train_on_batch(
                    side_image, [front_image, real])

                # Plot the progress
                print(
                    '\nTraining epoch : %d \nTraining batch : %d \nAccuracy of discriminator : %.2f%% \nLoss of discriminator : %f \nLoss of generator : %f '
                    % (k, l, discriminator_loss[1] * 100,
                       discriminator_loss[0], generator_loss[2]))

                record = (k, l, discriminator_loss[1] * 100,
                          discriminator_loss[0], generator_loss[2])
                self.history.append(record)

            # If at save interval -> save generated image samples
            if k == 10:
                self.save_image(front_image=front_image,
                                number=k,
                                side_image=side_image,
                                save_path=self.save_path)

            if k % 10 == 0:
                # Check folder presence
                if not os.path.isdir(self.save_path + 'H5/'):
                    os.makedirs(self.save_path + 'H5/')
                self.generator.save(self.save_path +
                                    'generator_epoch_%d.h5' % k)
                self.generator.save_weights(self.save_path +
                                            'generator_weights_epoch_%d.h5' %
                                            k)

        self.history = np.array(self.history)

        self.graph(history=self.history, save_path=save_path)

    def save_image(self, front_image, number, side_image, save_path):
        # Rescale images 0 - 1
        generated_image = 0.5 * self.generator.predict(side_image) + 0.5

        front_image = (127.5 * (front_image + 1)).astype(np.uint8)
        side_image = (127.5 * (side_image + 1)).astype(np.uint8)

        # Show image (first column : original side image, second column : original front image, third column = generated image(front image))
        for m in range(batch_size):
            plt.figure(figsize=(8, 2))

            # Adjust the interval of the image
            plt.subplots_adjust(wspace=0.6)

            for n in range(self.n_show_image):
                generated_image_plot = plt.subplot(
                    1, 3, n + 1 + (2 * self.n_show_image))
                generated_image_plot.set_title('Generated image (front image)')

                if self.channels == 1:
                    plt.imshow(generated_image[m, :, :, 0], cmap='gray')

                else:
                    plt.imshow(generated_image[m, :, :, :])

                original_front_face_image_plot = plt.subplot(
                    1, 3, n + 1 + self.n_show_image)
                original_front_face_image_plot.set_title(
                    'Origninal front image')

                if self.channels == 1:
                    plt.imshow(front_image[m].reshape(self.height, self.width),
                               cmap='gray')

                else:
                    plt.imshow(front_image[m])

                original_side_face_image_plot = plt.subplot(1, 3, n + 1)
                original_side_face_image_plot.set_title('Origninal side image')

                if self.channels == 1:
                    plt.imshow(side_image[m].reshape(self.height, self.width),
                               cmap='gray')

                else:
                    plt.imshow(side_image[m])

                # Don't show axis of x and y
                generated_image_plot.axis('off')
                original_front_face_image_plot.axis('off')
                original_side_face_image_plot.axis('off')

                self.number += 1

                # plt.show()

            save_path = save_path

            # Check folder presence
            if not os.path.isdir(save_path):
                os.makedirs(save_path)

            save_name = 'Train%d_Batch%d_%d.png' % (number, m, self.number)
            save_name = os.path.join(save_path, save_name)

            plt.savefig(save_name)
            plt.close()

    def graph(self, history, save_path):
        plt.plot(self.history[:, 2])
        plt.plot(self.history[:, 3])
        plt.plot(self.history[:, 4])
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Generative adversarial network')
        plt.legend([
            'Accuracy of discriminator', 'Loss of discriminator',
            'Loss of generator'
        ],
                   loc='upper left')

        figure = plt.gcf()

        # plt.show()

        save_path = save_path

        # Check folder presence
        if not os.path.isdir(save_path):
            os.makedirs(save_path)

        # save_name = '%d.png' % number
        save_name = 'History.png'
        save_name = os.path.join(save_path, save_name)

        figure.savefig(save_name)
        plt.close()
Example #4
0
class GAN():
    def __init__(self):
        # Load data
        self.datagenerator = DataGenerator(X_train,
                                           Y_train,
                                           batch_size=batch_size)

        # Prameters
        self.height = 224
        self.width = 224
        self.channels = 3

        self.optimizer = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999)

        self.vgg16 = self.build_vgg16()
        self.resnet50 = self.build_resnet50()

        self.n_show_image = 1  # Number of images to show
        self.history = []
        self.number = 1
        self.save_path = 'D:/Generated Image/Training' + str(time) + '/'

        # Build and compile the discriminator
        self.frontalization_discriminator = self.build_frontalization_discriminator(
        )
        self.frontalization_discriminator.compile(loss='binary_crossentropy',
                                                  optimizer=self.optimizer,
                                                  metrics=['accuracy'])

        self.resolution_discriminator = self.build_resolution_discriminator()
        self.resolution_discriminator.compile(loss='binary_crossentropy',
                                              optimizer=self.optimizer,
                                              metrics=['accuracy'])

        # Build and compile the generator
        self.frontalization_generator = self.build_frontalization_generator()

        # Build and compile the resolution
        self.resolution_generator = self.build_resolution_generator()
        self.resolution_generator.compile(loss=self.vgg19_loss,
                                          optimizer=self.optimizer)

        # Save .json
        frontalization_generator_model_json = self.frontalization_generator.to_json(
        )
        resolution_generator_model_json = self.resolution_generator.to_json()

        # Check folder presence
        if not os.path.isdir(self.save_path + 'Json/'):
            os.makedirs(self.save_path + 'Json/')

        with open(self.save_path + 'Json/frontalization_generator_model.json',
                  "w") as json_file:
            json_file.write(frontalization_generator_model_json)

        with open(self.save_path + 'Json/resolution_generator_model.json',
                  "w") as json_file:
            json_file.write(resolution_generator_model_json)

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.height, self.width, self.channels))
        frontalization_image = self.frontalization_generator(z)
        resolution_image = self.resolution_generator(z)

        # For the combined model we will only train the generator
        self.frontalization_discriminator.trainable = False
        self.resolution_discriminator.trainable = False

        # The discriminator takes generated images as input and determines validiy
        frontalization_valid = self.frontalization_discriminator(
            frontalization_image)
        resolution_valid = self.resolution_discriminator(resolution_image)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.frontalization_gan = Model(
            z, [frontalization_image, frontalization_valid])
        self.frontalization_gan.compile(loss='binary_crossentropy',
                                        optimizer=self.optimizer)

        # self.frontalization_gan.summary()

        self.resolution_gan = Model(z, [resolution_image, resolution_valid])
        self.resolution_gan.compile(
            loss=[self.vgg19_loss, 'binary_crossentropy'],
            loss_weights=[1., 1e-3],
            optimizer=self.optimizer)

        # self.resolution_gan.summary()

    def residual_block(self, layer, filters, kernel_size, strides):
        residual_input = layer

        residual_layer = Conv2D(filters=filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding='same')(residual_input)
        residual_layer = BatchNormalization(momentum=0.5)(residual_layer)

        # Using Parametric ReLU
        residual_layer = PReLU(alpha_initializer='zeros',
                               alpha_regularizer=None,
                               alpha_constraint=None,
                               shared_axes=[1, 2])(residual_layer)
        residual_layer = Conv2D(filters=filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding='same')(residual_layer)
        residual_output = BatchNormalization(momentum=0.5)(residual_layer)

        residual_model = add([residual_input, residual_output])

        return residual_model

    # computes VGG loss or content loss
    def vgg19_loss(self, true, prediction):
        vgg19 = VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=(self.height, self.width, self.channels))
        # Make trainable as False

        vgg19.trainable = False

        for layer in vgg19.layers:
            layer.trainable = False

        model = Model(inputs=vgg19.input,
                      outputs=vgg19.get_layer('block5_conv4').output)
        model.trainable = False

        return K.mean(K.square(model(true) - model(prediction)))

    def build_vgg16(self):
        vgg16 = VGGFace(include_top=False,
                        model='vgg16',
                        weights='vggface',
                        input_shape=(self.height, self.width, self.channels))

        # Make trainable as False

        vgg16.trainable = False

        for i in vgg16.layers:
            i.trainable = False

        # vgg16.summary()

        return vgg16

    def build_resnet50(self):
        resnet50 = VGGFace(include_top=False,
                           model='resnet50',
                           weights='vggface',
                           input_shape=(self.height, self.width,
                                        self.channels))

        # Make trainable as False

        resnet50.trainable = False

        for j in resnet50.layers:
            j.trainable = False

        # resnet50.summary()

        return resnet50

    def build_frontalization_generator(self):
        generator_input = self.vgg16.get_layer('pool5').output

        generator_layer = Conv2DTranspose(filters=512,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='same')(generator_input)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=256,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='same')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=128,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='same')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=64,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='same')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=self.channels,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='same')(generator_layer)

        generator_output = Activation('tanh')(generator_layer)

        generator = Model(inputs=self.vgg16.input, outputs=generator_output)

        # generator.summary()

        return generator

    def build_frontalization_discriminator(self):
        # discriminator_input = Input(shape = (self.height, self.width, self.channels))

        # discriminator_layer = Conv2D(filters = 64, kernel_size = (3, 3), strides = (2, 2), padding = 'valid')(discriminator_input)
        # discriminator_layer = LeakyReLU(alpha = 0.2)(discriminator_layer)
        # discriminator_layer = Dropout(rate = 0.25)(discriminator_layer)
        # discriminator_layer = Conv2D(filters = 128, kernel_size = (3, 3), strides = (2, 2), padding = 'valid')(discriminator_layer)
        # discriminator_layer = BatchNormalization(momentum = 0.8)(discriminator_layer)
        # discriminator_layer = LeakyReLU(alpha = 0.2)(discriminator_layer)
        # discriminator_layer = Dropout(rate = 0.25)(discriminator_layer)
        # discriminator_layer = Conv2D(filters = 256, kernel_size = (3, 3), strides = (2, 2), padding = 'valid')(discriminator_layer)
        # discriminator_layer = BatchNormalization(momentum = 0.8)(discriminator_layer)
        # discriminator_layer = LeakyReLU(alpha = 0.2)(discriminator_layer)
        # discriminator_layer = Dropout(rate = 0.25)(discriminator_layer)
        # discriminator_layer = Conv2D(filters = 512, kernel_size = (3, 3), strides = (2, 2), padding = 'valid')(discriminator_layer)
        # discriminator_layer = BatchNormalization(momentum = 0.8)(discriminator_layer)
        # discriminator_layer = LeakyReLU(alpha = 0.2)(discriminator_layer)
        # discriminator_layer = Dropout(rate = 0.25)(discriminator_layer)
        # discriminator_layer = Flatten()(discriminator_layer)

        discriminator_input = self.resnet50.get_layer('avg_pool').output

        discriminator_layer = Flatten()(discriminator_input)

        discriminator_output = Dense(units=1,
                                     activation='sigmoid')(discriminator_layer)

        discriminator = Model(inputs=self.resnet50.input,
                              outputs=discriminator_output)

        # discriminator.summary()

        return discriminator

    def build_resolution_generator(self):
        generator_input = Input(shape=(self.height, self.width, self.channels))

        generator_layer = Conv2D(filters=16,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_input)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)
        generator_layer = Conv2D(filters=32,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)
        generator_layer = Conv2D(filters=64,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)

        for k in range(16):
            residual_layer = self.residual_block(layer=generator_layer,
                                                 filters=64,
                                                 kernel_size=(3, 3),
                                                 strides=(1, 1))

        generator_layer = add([generator_layer, residual_layer])

        generator_layer = Conv2D(filters=256,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2D(filters=256,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2D(filters=256,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2D(filters=self.channels,
                                 kernel_size=(9, 9),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)

        generator_output = Activation('tanh')(generator_layer)

        generator = Model(inputs=generator_input, outputs=generator_output)

        # generator.summary()

        return generator

    def build_resolution_discriminator(self):
        discriminator_input = Input(shape=(self.height, self.width,
                                           self.channels))

        discriminator_layer = Conv2D(filters=64,
                                     kernel_size=(3, 3),
                                     strides=(2, 2),
                                     padding='same')(discriminator_input)
        discriminator_layer = BatchNormalization(
            momentum=0.5)(discriminator_layer)
        discriminator_layer = LeakyReLU(alpha=0.2)(discriminator_layer)
        discriminator_layer = Conv2D(filters=128,
                                     kernel_size=(3, 3),
                                     strides=(1, 1),
                                     padding='same')(discriminator_layer)
        discriminator_layer = BatchNormalization(
            momentum=0.5)(discriminator_layer)
        discriminator_layer = LeakyReLU(alpha=0.2)(discriminator_layer)
        discriminator_layer = Conv2D(filters=256,
                                     kernel_size=(3, 3),
                                     strides=(2, 2),
                                     padding='same')(discriminator_input)
        discriminator_layer = BatchNormalization(
            momentum=0.5)(discriminator_layer)
        discriminator_layer = LeakyReLU(alpha=0.2)(discriminator_layer)
        discriminator_layer = Conv2D(filters=512,
                                     kernel_size=(3, 3),
                                     strides=(1, 1),
                                     padding='same')(discriminator_layer)
        discriminator_layer = BatchNormalization(
            momentum=0.5)(discriminator_layer)
        discriminator_layer = LeakyReLU(alpha=0.2)(discriminator_layer)
        discriminator_layer = Conv2D(filters=512,
                                     kernel_size=(3, 3),
                                     strides=(2, 2),
                                     padding='same')(discriminator_input)
        discriminator_layer = BatchNormalization(
            momentum=0.5)(discriminator_layer)
        discriminator_layer = LeakyReLU(alpha=0.2)(discriminator_layer)
        discriminator_layer = Flatten()(discriminator_layer)
        # discriminator_layer = Dense(units = 1024)(discriminator_layer)
        # discriminator_layer = LeakyReLU(alpha = 0.2)(discriminator_layer)
        discriminator_layer = Dense(units=1)(discriminator_layer)

        discriminator_output = Activation('sigmoid')(discriminator_layer)

        discriminator = Model(inputs=discriminator_input,
                              outputs=discriminator_output)

        # discriminator.summary()

        return discriminator

    def train(self, epochs, batch_size, save_interval):
        # Adversarial ground truths
        # fake = np.zeros((batch_size, 1))
        # real = np.ones((batch_size, 1))
        fake = np.random.random_sample((batch_size, 1)) * 0.2
        real = np.ones((batch_size, 1)) - np.random.random_sample(
            (batch_size, 1)) * 0.2

        print('Training')

        for l in range(1, epochs + 1):
            for m in tqdm(range(1, self.datagenerator.__len__() + 1)):
                # Select images
                side_image, front_image = self.datagenerator.__getitem__(l - 1)

                frontalization_generated_image = self.frontalization_generator.predict(
                    side_image)

                resolution_generated_image = self.resolution_generator.predict(
                    side_image)

                self.frontalization_discriminator.trainable = True
                self.resolution_discriminator.trainable = True

                # Train the discriminator (real classified as ones and generated as zeros)
                frontalization_discriminator_fake_loss = self.frontalization_discriminator.train_on_batch(
                    frontalization_generated_image, fake)
                frontalization_discriminator_real_loss = self.frontalization_discriminator.train_on_batch(
                    front_image, real)
                frontalization_discriminator_loss = 0.5 * np.add(
                    frontalization_discriminator_fake_loss,
                    frontalization_discriminator_real_loss)

                resolution_discriminator_fake_loss = self.resolution_discriminator.train_on_batch(
                    resolution_generated_image, fake)
                resolution_discriminator_real_loss = self.resolution_discriminator.train_on_batch(
                    front_image, real)
                resolution_discriminator_loss = 0.5 * np.add(
                    resolution_discriminator_fake_loss,
                    resolution_discriminator_real_loss)

                self.frontalization_discriminator.trainable = False
                self.resolution_discriminator.trainable = False

                # Train the generator (wants discriminator to mistake images as real)
                frontalization_generator_loss = self.frontalization_gan.train_on_batch(
                    side_image, [front_image, real])

                resolution_generator_loss = self.resolution_gan.train_on_batch(
                    frontalization_generated_image, [front_image, real])

                # Plot the progress
                print(
                    '\nTraining epoch : %d \nTraining batch : %d \nAccuracy of discriminator : %.2f%% \nLoss of discriminator : %f \nLoss of generator : %f '
                    % (l, m, frontalization_discriminator_loss[1] * 100,
                       frontalization_discriminator_loss[0],
                       frontalization_generator_loss[2]))

                record = (l, m, frontalization_discriminator_loss[1] * 100,
                          frontalization_discriminator_loss[0],
                          frontalization_generator_loss[2])
                self.history.append(record)

                # # If at save interval -> save generated image samples
                # if m % save_interval == 0:
                #     self.save_image(front_image = front_image, side_image = side_image, epoch_number = l, batch_number = m, save_path = self.save_path)

            self.datagenerator.on_epoch_end()

            # Save generated images and .h5
            if l % save_interval == 0:
                self.save_image(front_image=front_image,
                                side_image=side_image,
                                epoch_number=l,
                                batch_number=m,
                                save_path=self.save_path)

                # Check folder presence
                if not os.path.isdir(self.save_path + 'H5/'):
                    os.makedirs(self.save_path + 'H5/')

                self.frontalization_generator.save(self.save_path + 'H5/' +
                                                   'generator_epoch_%d.h5' % l)
                self.frontalization_generator.save_weights(
                    self.save_path + 'H5/' +
                    'generator_weights_epoch_%d.h5' % l)
                self.resolution_generator.save(self.save_path + 'H5/' +
                                               'resolution_epoch_%d.h5' % l)
                self.resolution_generator.save_weights(
                    self.save_path + 'H5/' +
                    'resolution_weights_epoch_%d.h5' % l)

        self.history = np.array(self.history)

        self.graph(history=self.history, save_path=self.save_path + 'History/')

    def save_image(self, front_image, side_image, epoch_number, batch_number,
                   save_path):
        # Rescale images 0 - 1
        # generated_image = 0.5 * self.generator.predict(side_image) + 0.5
        frontalization_generated_image = self.frontalization_generator.predict(
            side_image)

        resolution_generated_image = 0.5 * self.resolution_generator.predict(
            frontalization_generated_image) + 0.5

        front_image = (127.5 * (front_image + 1)).astype(np.uint8)
        side_image = (127.5 * (side_image + 1)).astype(np.uint8)

        # Show image (first column : original side image, second column : original front image, third column : generated image(front image))
        for m in range(batch_size):
            plt.figure(figsize=(8, 2))

            # Adjust the interval of the image
            plt.subplots_adjust(wspace=0.6)

            for n in range(self.n_show_image):
                generated_image_plot = plt.subplot(
                    1, 3, n + 1 + (2 * self.n_show_image))
                generated_image_plot.set_title('Generated image (front image)')
                plt.imshow(resolution_generated_image[m, :, :, :])

                original_front_face_image_plot = plt.subplot(
                    1, 3, n + 1 + self.n_show_image)
                original_front_face_image_plot.set_title(
                    'Origninal front image')
                plt.imshow(front_image[m])

                original_side_face_image_plot = plt.subplot(1, 3, n + 1)
                original_side_face_image_plot.set_title('Origninal side image')
                plt.imshow(side_image[m])

                # Don't show axis of x and y
                generated_image_plot.axis('off')
                original_front_face_image_plot.axis('off')
                original_side_face_image_plot.axis('off')

                self.number += 1

                # plt.show()

            save_path = save_path

            # Check folder presence
            if not os.path.isdir(save_path):
                os.makedirs(save_path)

            save_name = 'Train%d_Batch%d_%d.png' % (epoch_number, batch_number,
                                                    self.number)
            save_name = os.path.join(save_path, save_name)

            plt.savefig(save_name)
            plt.close()

        self.number = 1

    def graph(self, history, save_path):
        plt.plot(self.history[:, 2])
        plt.plot(self.history[:, 3])
        plt.plot(self.history[:, 4])
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Generative adversarial network')
        plt.legend([
            'Accuracy of discriminator', 'Loss of discriminator',
            'Loss of generator'
        ],
                   loc='upper left')

        figure = plt.gcf()
        save_path = save_path

        # Check folder presence
        if not os.path.isdir(save_path):
            os.makedirs(save_path)

        # save_name = '%d.png' % number
        save_name = 'History.png'
        save_name = os.path.join(save_path, save_name)

        figure.savefig(save_name)
        plt.close()
Example #5
0
class DCGAN():
    def __init__(self):
        # Load data
        self.datagenerator = DataGenerator(X_train,
                                           Y_train,
                                           batch_size=batch_size)

        # Prameters
        self.height = 224
        self.width = 224
        self.channels = 3

        self.optimizer = Adam(lr=0.0002, beta_1=0.5)

        self.n_show_image = 1  # Number of images to show
        self.history = []
        self.number = 1
        self.save_path = 'D:/Generated Image/Training' + str(time) + '/'

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=self.optimizer,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # Save .json
        generator_model_json = self.generator.to_json()

        # Check folder presence
        if not os.path.isdir(self.save_path + 'Json/'):
            os.makedirs(self.save_path + 'Json/')

        with open(self.save_path + 'Json/generator_model.json',
                  "w") as json_file:
            json_file.write(generator_model_json)

        # The generator takes side images as input and generates images
        z = Input(shape=(self.height, self.width, self.channels))
        image = self.generator(z)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        valid = self.discriminator(image)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, valid)
        self.combined.compile(loss='binary_crossentropy',
                              optimizer=self.optimizer)

        # self.combined.summary()

    def senet50(self):
        senet50_layer = VGGFace(include_top=False,
                                model='senet50',
                                weights='vggface',
                                input_shape=(self.height, self.width,
                                             self.channels))

        senet50_layer.trainable = False

        # senet50_layer.summary()

        return senet50_layer

    def build_generator(self):
        senet50_layer = self.senet50()

        senet50_last_layer = senet50_layer.get_layer('activation_162').output

        generator_layer = Conv2DTranspose(filters=256,
                                          kernel_size=(4, 4),
                                          strides=(1, 1),
                                          padding='valid')(senet50_last_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=32,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='valid')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=64,
                                          kernel_size=(4, 4),
                                          strides=(1, 1),
                                          padding='valid')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=64,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='valid')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=64,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='valid')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=64,
                                          kernel_size=(4, 4),
                                          strides=(1, 1),
                                          padding='valid')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=64,
                                          kernel_size=(4, 4),
                                          strides=(2, 2),
                                          padding='valid')(generator_layer)
        generator_layer = BatchNormalization(momentum=0.8)(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2DTranspose(filters=self.channels,
                                          kernel_size=(5, 5),
                                          strides=(1, 1),
                                          padding='valid')(generator_layer)

        generator_output = Activation('tanh')(generator_layer)

        generator = Model(inputs=senet50_layer.input, outputs=generator_output)

        # generator.summary()

        return generator

    def build_discriminator(self):
        senet50_layer = self.senet50()

        senet50_last_layer = senet50_layer.get_layer('activation_81').output

        discriminator_layer = Flatten()(senet50_last_layer)

        discriminator_output = Dense(units=1,
                                     activation='sigmoid')(discriminator_layer)

        discriminator = Model(inputs=senet50_layer.input,
                              outputs=discriminator_output)

        return discriminator

    def train(self, epochs, batch_size, save_interval):
        # Adversarial ground truths
        fake = np.zeros((batch_size, 1))
        real = np.ones((batch_size, 1))

        print('Training')

        for k in range(1, epochs + 1):
            for l in tqdm(range(1, self.datagenerator.__len__() + 1)):
                # Select images
                side_image, front_image = self.datagenerator.__getitem__(l)

                # Generate a batch of new images
                generated_image = self.generator.predict(side_image)

                self.discriminator.trainable = True

                # Train the discriminator (real classified as ones and generated as zeros)
                discriminator_fake_loss = self.discriminator.train_on_batch(
                    generated_image, fake)
                discriminator_real_loss = self.discriminator.train_on_batch(
                    front_image, real)
                discriminator_loss = 0.5 * np.add(discriminator_fake_loss,
                                                  discriminator_real_loss)

                self.discriminator.trainable = False

                # Train the generator (wants discriminator to mistake images as real)
                generator_loss = self.combined.train_on_batch(side_image, real)

                # Plot the progress
                print(
                    '\nTraining epoch : %d \nTraining batch : %d \nAccuracy of discriminator : %.2f%% \nLoss of discriminator : %f \nLoss of generator : %f '
                    % (k, l, discriminator_loss[1] * 100,
                       discriminator_loss[0], generator_loss))

                record = (k, l, discriminator_loss[1] * 100,
                          discriminator_loss[0], generator_loss)
                self.history.append(record)

                # If at save interval -> save generated image samples
                if l % save_interval == 0:
                    self.save_image(front_image=front_image,
                                    side_image=side_image,
                                    train_number=k,
                                    epoch_number=l,
                                    save_path=self.save_path)

            # Save .h5
            if k % 5 == 0:
                # Check folder presence
                if not os.path.isdir(self.save_path + 'H5/'):
                    os.makedirs(self.save_path + 'H5/')

                self.generator.save(self.save_path + 'H5/' +
                                    'generator_epoch_%d.h5' % k)
                self.generator.save_weights(self.save_path + 'H5/' +
                                            'generator_weights_epoch_%d.h5' %
                                            k)

        self.history = np.array(self.history)

        self.graph(history=self.history, save_path=self.save_path + 'History/')

    def save_image(self, front_image, side_image, train_number, epoch_number,
                   save_path):
        # Rescale images 0 - 1
        generated_image = 0.5 * self.generator.predict(side_image) + 0.5

        front_image = (127.5 * (front_image + 1)).astype(np.uint8)
        side_image = (127.5 * (side_image + 1)).astype(np.uint8)

        # Show image (first column : original side image, second column : original front image, third column = generated image(front image))
        for m in range(batch_size):
            plt.figure(figsize=(8, 2))

            # Adjust the interval of the image
            plt.subplots_adjust(wspace=0.6)

            for n in range(self.n_show_image):
                generated_image_plot = plt.subplot(
                    1, 3, n + 1 + (2 * self.n_show_image))
                generated_image_plot.set_title('Generated image (front image)')
                plt.imshow(generated_image[m, :, :, :])

                original_front_face_image_plot = plt.subplot(
                    1, 3, n + 1 + self.n_show_image)
                original_front_face_image_plot.set_title(
                    'Origninal front image')
                plt.imshow(front_image[m])

                original_side_face_image_plot = plt.subplot(1, 3, n + 1)
                original_side_face_image_plot.set_title('Origninal side image')
                plt.imshow(side_image[m])

                # Don't show axis of x and y
                generated_image_plot.axis('off')
                original_front_face_image_plot.axis('off')
                original_side_face_image_plot.axis('off')

                self.number += 1

                # plt.show()

            save_path = save_path

            # Check folder presence
            if not os.path.isdir(save_path):
                os.makedirs(save_path)

            save_name = 'Train%d_Batch%d_%d.png' % (train_number, epoch_number,
                                                    self.number)
            save_name = os.path.join(save_path, save_name)

            plt.savefig(save_name)
            plt.close()

    def graph(self, history, save_path):
        plt.plot(self.history[:, 2])
        plt.plot(self.history[:, 3])
        plt.plot(self.history[:, 4])
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Generative adversarial network')
        plt.legend([
            'Accuracy of discriminator', 'Loss of discriminator',
            'Loss of generator'
        ],
                   loc='upper left')

        figure = plt.gcf()

        # plt.show()

        save_path = save_path

        # Check folder presence
        if not os.path.isdir(save_path):
            os.makedirs(save_path)

        # save_name = '%d.png' % number
        save_name = 'History.png'
        save_name = os.path.join(save_path, save_name)

        figure.savefig(save_name)
        plt.close()
X_train = glob("D:/X_train/*jpg")
Y_train = glob("D:/Y_train/*jpg")

# X = X / 127.5 - 1
# Y = Y / 127.5 - 1

height = 128
width = 128
channels = 3
z_dimension = 512
batch_size = 32
epochs = 10000
line = 3
n_show_image = 1
DG = DataGenerator(X_train, Y_train, batch_size=batch_size)

optimizerD = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999)
optimizerG = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999)

history = []


def conv2d_block(layers,
                 filters,
                 kernel_size=(4, 4),
                 strides=2,
                 momentum=0.8,
                 alpha=0.2):
    input = layers
class vggGan():
    def __init__(self):
        self.height = height
        self.width = width
        self.channels = channels
        self.batch_size = batch_size
        self.epochs = epochs
        self.line = line
        self.n_show_image = n_show_image
        self.vgg = vgg
        self.optimizerD = optimizerD
        self.optimizerC = optimizerC
        self.DG = DataGenerator(X_train, Y_train, batch_size=batch_size)
        self.DGP = DataGenerator_predict(X_predict, batch_size=batch_size)
        self.number = number

        patch = int(self.height / 2**1)
        self.disc_patch = (patch, patch, 3)

        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=self.optimizerD)

        self.generator = self.build_generator()

        self.discriminator.trainable = False

        side = Input(shape=(self.height, self.width, self.channels))
        front = Input(shape=(self.height, self.width, self.channels))
        image = self.generator(side)

        valid = self.discriminator([front, image])

        self.combined = Model([side, front], [image, valid])
        self.combined.compile(loss=['mae', "mse"],
                              loss_weights=[100, 1],
                              optimizer=self.optimizerC)

    def conv2d_block(self,
                     layers,
                     filters,
                     kernel_size=(4, 4),
                     strides=2,
                     momentum=0.8,
                     alpha=0.2):

        input = layers
        layer = Conv2D(filters=filters,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding="same")(input)
        layer = BatchNormalization(momentum=momentum)(layer)
        output = LeakyReLU(alpha=alpha)(layer)

        return output

    def deconv2d_block(self,
                       layers,
                       filters,
                       kernel_size=(4, 4),
                       strides=2,
                       momentum=0.8,
                       alpha=0.2):

        input = layers
        layer = Conv2DTranspose(filters=filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding='same')(input)
        layer = BatchNormalization(momentum=momentum)(layer)
        # output = LeakyReLU(alpha = alpha)(layer)
        output = ReLU()(layer)

        return output

    def build_discriminator(self):
        input_A = Input(shape=(self.height, self.width, self.channels))
        input_B = Input(shape=(self.height, self.width, self.channels))
        input_layer = Concatenate(axis=-1)([input_A, input_B])
        layer = self.conv2d_block(input_layer, 64)
        # layer = self.conv2d_block(layer, 128)
        # layer = self.conv2d_block(layer, 256)
        # layer = self.conv2d_block(layer, 512)
        # layer = self.conv2d_block(layer, 1024)
        output = Conv2D(filters=3,
                        kernel_size=(4, 4),
                        strides=1,
                        padding="same")(layer)
        model = Model([input_A, input_B], output)
        model.summary()
        return model

    def build_generator(self):
        input = self.vgg.get_layer("pool5").output
        conv2 = self.vgg.get_layer("conv2_2").output
        conv3 = self.vgg.get_layer("conv3_3").output
        conv4 = self.vgg.get_layer("conv4_3").output
        conv5 = self.vgg.get_layer("conv5_3").output
        layers = self.deconv2d_block(input, 512)
        layers = Concatenate(axis=-1)([layers, conv5])
        layers = self.deconv2d_block(layers, 512)
        layers = Concatenate(axis=-1)([layers, conv4])
        layers = self.deconv2d_block(layers, 256)
        layers = Concatenate(axis=-1)([layers, conv3])
        layers = self.deconv2d_block(layers, 128)
        layers = Concatenate(axis=-1)([layers, conv2])
        output = Conv2DTranspose(filters=3,
                                 kernel_size=(4, 4),
                                 strides=2,
                                 activation='tanh',
                                 padding='same')(layers)
        model = Model(self.vgg.input, output)
        for layer in model.layers:
            layer.trainable = False
            if layer.name == "pool5":
                break
        # model.summary()
        return model

    def train(self, epochs, batch_size, save_interval):
        fake = np.zeros((batch_size, ) + self.disc_patch)
        real = np.ones((batch_size, ) + self.disc_patch)

        for epoch in range(epochs):
            for batch in range(self.DG.__len__()):
                side_images, front_images = self.DG.__getitem__(batch)

                generated_images = self.generator.predict(side_images)

                discriminator_fake_loss = self.discriminator.train_on_batch(
                    [front_images, generated_images], fake)
                discriminator_real_loss = self.discriminator.train_on_batch(
                    [front_images, front_images], real)
                discriminator_loss = np.add(discriminator_fake_loss,
                                            discriminator_real_loss)

                generator_loss = self.combined.train_on_batch(
                    [side_images, front_images], [front_images, real])

                print(
                    '\nTraining epoch : %d \nTraining batch : %d / %d \nLoss of discriminator : %f \nLoss of generator : %f'
                    % (epoch + 1, batch + 1, self.DG.__len__(),
                       discriminator_loss, generator_loss[1]))

                # if batch % save_interval == 0:
                #     save_path = 'D:/Generated Image/Training' + str(line) + '/'
                #     self.save_image(epoch = epoch, batch = batch, front_image = front_images, side_image = side_images, save_path = save_path)
                # self.generator.save(save_path+"{1}_{0}.h5".format(str(batch), str(line)))

            if epoch % 1 == 0:
                # print(i)
                save_path = 'D:/Generated Image/Training' + str(line) + '/'
                self.save_image(epoch=epoch,
                                batch=batch,
                                front_image=front_images,
                                side_image=side_images,
                                save_path=save_path)

                predict_side_images = self.DGP.__getitem__(0)
                save_path = 'D:/Generated Image/Predict' + str(line) + '/'
                self.save_predict_image(epoch=epoch,
                                        batch=batch,
                                        side_image=predict_side_images,
                                        save_path=save_path)
                self.generator.save(
                    "D:/Generated Image/Predict{2}/{1}_{0}.h5".format(
                        str(epoch), str(line), str(line)))

            self.DG.on_epoch_end()
            self.DGP.on_epoch_end()

    def save_image(self, epoch, batch, front_image, side_image, save_path):

        generated_image = (0.5 * self.generator.predict(side_image) + 0.5)
        front_image = (255 * ((front_image) + 1) / 2).astype(np.uint8)
        side_image = (255 * ((side_image) + 1) / 2).astype(np.uint8)

        for i in range(self.batch_size):
            plt.figure(figsize=(8, 2))

            plt.subplots_adjust(wspace=0.6)

            for m in range(n_show_image):
                generated_image_plot = plt.subplot(1, 3,
                                                   m + 1 + (2 * n_show_image))
                generated_image_plot.set_title('Generated image (front image)')
                plt.imshow(generated_image[i])

                original_front_face_image_plot = plt.subplot(
                    1, 3, m + 1 + n_show_image)
                original_front_face_image_plot.set_title(
                    'Origninal front image')
                plt.imshow(front_image[i])

                original_side_face_image_plot = plt.subplot(1, 3, m + 1)
                original_side_face_image_plot.set_title('Origninal side image')
                plt.imshow(side_image[i])

                # Don't show axis of x and y
                generated_image_plot.axis('off')
                original_front_face_image_plot.axis('off')
                original_side_face_image_plot.axis('off')

                self.number += 1

                # plt.show()

            save_path = save_path

            # Check folder presence
            if not os.path.isdir(save_path):
                os.makedirs(save_path)

            save_name = '%d-%d-%d.png' % (epoch, batch, i)
            save_name = os.path.join(save_path, save_name)

            plt.savefig(save_name)
            plt.close()

    def save_predict_image(self, epoch, batch, side_image, save_path):

        generated_image = 0.5 * self.generator.predict(side_image) + 0.5
        side_image = (255 * ((side_image) + 1) / 2).astype(np.uint8)

        for i in range(self.batch_size):
            plt.figure(figsize=(8, 2))

            plt.subplots_adjust(wspace=0.6)

            for m in range(n_show_image):
                generated_image_plot = plt.subplot(1, 2, m + 1 + n_show_image)
                generated_image_plot.set_title('Generated image (front image)')
                plt.imshow(generated_image[i])

                original_side_face_image_plot = plt.subplot(1, 2, m + 1)
                original_side_face_image_plot.set_title('Origninal side image')
                plt.imshow(side_image[i])

                # Don't show axis of x and y
                generated_image_plot.axis('off')
                original_side_face_image_plot.axis('off')

                self.number += 1

                # plt.show()

            save_path = save_path

            # Check folder presence
            if not os.path.isdir(save_path):
                os.makedirs(save_path)

            save_name = '%d-%d-%d.png' % (epoch, batch, i)
            save_name = os.path.join(save_path, save_name)

            plt.savefig(save_name)
            plt.close()