def CreateDataset(opt):
    dataset = None
    if opt.dataset_mode == 'aligned':
        from datasets.aligned_dataset import AlignedDataset
        dataset = AlignedDataset()
    elif opt.dataset_mode == 'unaligned':
        from datasets.unaligned_dataset import UnalignedDataset
        dataset = UnalignedDataset()
    elif opt.dataset_mode == 'labeled':
        from datasets.labeled_dataset import LabeledDataset
        dataset = LabeledDataset()
    elif opt.dataset_mode == 'single':
        from datasets.single_dataset import SingleDataset
        dataset = SingleDataset()
    elif opt.dataset_mode == 'reference_hd':
        from datasets.reference_hd_dataset import ReferenceHDDataset
        dataset = ReferenceHDDataset()
    elif opt.dataset_mode == 'reference_test_hd':
        from datasets.reference_test_hd_dataset import ReferenceTestHDDataset
        dataset = ReferenceTestHDDataset()
    else:
        raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode)

    print("dataset [%s] was created" % (dataset.name()))
    dataset.initialize(opt)
    return dataset
    def __init__(self, opt, training):
        BaseModel.__init__(self, opt, training)

        # create dataset loaders
        if training:
            self.dataset = UnpairedDataset(opt, training)
            self.datasetA, self.datasetB = self.dataset.generate(
                cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.dataB_iter = self.datasetB.make_initializable_iterator()
            self.low = self.dataA_iter.get_next()
            self.normal = self.dataB_iter.get_next()
        else:
            self.dataset = SingleDataset(opt, training)
            self.datasetA = self.dataset.generate()
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.low = self.dataA_iter.get_next()
예제 #3
0
    def __init__(self, opt, training):
        BaseModel.__init__(self, opt, training)

        # create dataset loaders
        if training:
            self.dataset = UnpairedDataset(opt, training)
            self.datasetA, self.datasetB = self.dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.dataB_iter = self.datasetB.make_initializable_iterator()
            self.realA = self.dataA_iter.get_next()
            self.realB = self.dataB_iter.get_next()
        else:
            self.dataset = SingleDataset(opt, training)
            self.datasetA = self.dataset.generate()
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.realA = self.dataA_iter.get_next()

        # create placeholders for fake images
        self.fakeA = tf.placeholder(tf.float32,
            shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
        self.fakeB = tf.placeholder(tf.float32,
            shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
예제 #4
0
def CreateDataset(opt):
    dataset = None
    if opt.dataset_mode == 'aligned':
        dataset = AlignedDataset(opt)
    elif opt.dataset_mode == 'unaligned':
        dataset = UnalignedDataset()
    elif opt.dataset_mode == 'single':
        dataset = SingleDataset()
        dataset.initialize(opt)
    else:
        raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode)

    print("dataset [%s] was created" % (dataset.name()))
    return dataset
예제 #5
0
    def generate_dataset(self):
        """
        Add ops for dataset loaders to graph
        """
        if self.training:
            dataset = UnpairedDataset(self.opt, self.training)
            datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',
                                                  cacheB='./dataB.tfcache')
            dataA_iter = datasetA.make_initializable_iterator()
            dataB_iter = datasetB.make_initializable_iterator()

            return dataA_iter, dataB_iter, dataA_iter.get_next(
            ), dataB_iter.get_next()
        else:  # only need shadow dataset for testing
            dataset = SingleDataset(self.opt, self.training)
            datasetA = dataset.generate()
            dataA_iter = datasetA.make_initializable_iterator()

            return dataA_iter, dataA_iter.get_next()
class EnlightenGANModel(BaseModel):
    """
    Implementation of EnlightenGAN model for low-light image enhancement with
    unpaired data.

    Paper: https://arxiv.org/pdf/1906.06972.pdf
    """
    def __init__(self, opt, training):
        BaseModel.__init__(self, opt, training)

        # create dataset loaders
        if training:
            self.dataset = UnpairedDataset(opt, training)
            self.datasetA, self.datasetB = self.dataset.generate(
                cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.dataB_iter = self.datasetB.make_initializable_iterator()
            self.low = self.dataA_iter.get_next()
            self.normal = self.dataB_iter.get_next()
        else:
            self.dataset = SingleDataset(opt, training)
            self.datasetA = self.dataset.generate()
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.low = self.dataA_iter.get_next()

    def build(self):
        """
        Build TensorFlow graph for EnlightenGAN model.
        """
        # add ops for Generator (low light -> normal light) to graph
        self.G = Generator(channels=self.opt.channels,
                           netG=self.opt.netG,
                           ngf=self.opt.ngf,
                           norm_type=self.opt.layer_norm_type,
                           init_type=self.opt.weight_init_type,
                           init_gain=self.opt.weight_init_gain,
                           dropout=self.opt.dropout,
                           self_attention=self.opt.self_attention,
                           times_residual=self.opt.times_residual,
                           skip=self.opt.skip,
                           training=self.training,
                           name='G')

        if self.training:
            # add ops for discriminator to graph
            self.D = Discriminator(channels=self.opt.channels,
                                   netD=self.opt.netD,
                                   n_layers=self.opt.n_layers,
                                   ndf=self.opt.ndf,
                                   norm_type=self.opt.layer_norm_type,
                                   init_type=self.opt.weight_init_type,
                                   init_gain=self.opt.weight_init_gain,
                                   training=self.training,
                                   gan_mode=self.opt.gan_mode,
                                   name='D')

            # add ops for patch discriminator to graph if necessary
            if self.opt.patchD:
                self.D_P = Discriminator(channels=self.opt.channels,
                                         netD=self.opt.netD,
                                         n_layers=self.opt.n_layers_patch,
                                         ndf=self.opt.ndf,
                                         norm_type=self.opt.layer_norm_type,
                                         init_type=self.opt.weight_init_type,
                                         init_gain=self.opt.weight_init_gain,
                                         training=self.training,
                                         gan_mode=self.opt.gan_mode,
                                         name='D_P')

            # build feature extractor if necessary
            if self.opt.vgg:
                self.vgg16 = tf.keras.applications.VGG16(include_top=False,
                                                         input_shape=(None,
                                                                      None, 3))
                self.vgg16.trainable = False

            gray = 1. - tf.image.rgb_to_grayscale((self.low + 1.) / 2.)

            if self.opt.skip > 0:
                enhanced, latent_enhanced = self.G(self.low, gray)
            else:
                enhanced = self.G(self.low, gray)

            if self.opt.patchD:
                height = self.opt.crop_size
                width = self.opt.crop_size
                height_offset = tf.random.uniform([1],
                                                  maxval=height -
                                                  self.opt.patch_size - 1,
                                                  dtype=tf.int32)
                width_offset = tf.random.uniform([1],
                                                 maxval=width -
                                                 self.opt.patch_size - 1,
                                                 dtype=tf.int32)

                low_patch = ops.crop(self.low, height_offset[0],
                                     width_offset[0], self.opt.patch_size)
                normal_patch = ops.crop(self.normal, height_offset[0],
                                        width_offset[0], self.opt.patch_size)
                enhanced_patch = ops.crop(enhanced, height_offset[0],
                                          width_offset[0], self.opt.patch_size)
            else:
                low_patch = None
                normal_patch = None
                enhanced_patch = None

            if self.opt.patchD_3 > 0:
                height = self.opt.crop_size
                width = self.opt.crop_size
                height_offset = tf.random.uniform([self.opt.patchD_3],
                                                  maxval=height -
                                                  self.opt.patch_size - 1,
                                                  dtype=tf.int32)
                width_offset = tf.random.uniform([self.opt.patchD_3],
                                                 maxval=width -
                                                 self.opt.patch_size - 1,
                                                 dtype=tf.int32)

                low_patches = tf.map_fn(lambda x: ops.crop(
                    self.low, x[0], x[1], self.opt.patch_size),
                                        (height_offset, width_offset),
                                        dtype=tf.float32)
                normal_patches = tf.map_fn(lambda x: ops.crop(
                    self.normal, x[0], x[1], self.opt.patch_size),
                                           (height_offset, width_offset),
                                           dtype=tf.float32)
                enhanced_patches = tf.map_fn(lambda x: ops.crop(
                    enhanced, x[0], x[1], self.opt.patch_size),
                                             (height_offset, width_offset),
                                             dtype=tf.float32)
            else:
                low_patches = None
                normal_patches = None
                enhanced_patches = None

            tf.summary.image('low', batch_convert_2_int(self.low))
            tf.summary.image('normal', batch_convert_2_int(self.normal))
            tf.summary.image('enhanced', batch_convert_2_int(enhanced))

            # add loss ops to graph
            Gen_loss, D_loss, D_P_loss = self.__loss(
                self.low, self.normal, enhanced, low_patch, normal_patch,
                enhanced_patch, low_patches, normal_patches, enhanced_patches)

            # add optimizer ops to graph
            optimizers = self.__optimizers(Gen_loss, D_loss, D_P_loss)

            if D_P_loss is None:  # create dummy value to avoid error
                D_P_loss = tf.constant(0)

            return enhanced, optimizers, Gen_loss, D_loss, D_P_loss
        else:
            enhanced = self.G(self.low)[0] if self.opt.skip > 0 else self.G(
                self.low)
            return enhanced

    def __loss(self, low, normal, enhanced, low_patch, normal_patch,
               enhanced_patch, low_patches, normal_patches, enhanced_patches):
        """
        Compute losses for generator and discriminators.
        """
        use_ragan = False if self.opt.hybrid_loss else True  # for hybrid losss

        # compute generator loss
        Gen_loss = self.__G_loss(self.D, normal, enhanced, use_ragan=True)

        if self.opt.patchD:
            Gen_loss += self.__G_loss(self.D_P,
                                      normal_patch,
                                      enhanced_patch,
                                      use_ragan=use_ragan)

            if self.opt.patchD_3 > 0:
                Gen_loss += self.__G_loss(self.D_P,
                                          normal_patches,
                                          enhanced_patches,
                                          use_ragan=use_ragan)

        if self.opt.vgg:
            Gen_loss += self.__perceptual_loss(low, enhanced, low_patch,
                                               enhanced_patch, low_patches,
                                               enhanced_patches)

        # compute global discriminator loss
        D_loss = self.__D_loss(self.D, normal, enhanced, use_ragan=True)

        # compute local discriminator loss if necessary
        D_P_loss = None
        if self.opt.patchD:
            D_P_loss = self.__D_loss(self.D_P,
                                     normal_patch,
                                     enhanced_patch,
                                     use_ragan=use_ragan)

            if self.opt.patchD_3 > 0:
                D_P_loss += self.__D_loss(self.D_P,
                                          normal_patches,
                                          enhanced_patches,
                                          use_ragan=use_ragan)

        return Gen_loss, D_loss, D_P_loss

    def __D_loss(self, D, normal, enhanced, use_ragan=False, eps=1e-12):
        """
        Compute the discriminator loss.

        If LSGAN is used: (MSE Loss)
            L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
        Otherwise: (NLL Loss)
            L_disc = -0.5 * [Expectation of log(D(B)) + Expectation of log(1 - D(G(A)))]
        """
        if self.opt.use_ragan and use_ragan:
            loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(normal) - tf.reduce_mean(D(enhanced)), 1.0)) + \
                          tf.reduce_mean(tf.square(D(enhanced) - tf.reduce_mean(D(normal)))))
        elif self.opt.gan_mode == 'lsgan':
            loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(normal), 1.0)) + \
                          tf.reduce_mean(tf.square(D(enhanced))))
        elif self.opt.gan_mode == 'vanilla':
            loss = -0.5 * (tf.reduce_mean(tf.log(D(normal) + eps)) + \
                           tf.reduce_mean(tf.log(1 - D(enhanced) + eps)))

        return loss

    def __G_loss(self, D, normal, enhanced, use_ragan=False, eps=1e-12):
        """
        Compute the generator loss.

        If LSGAN is used: (MSE Loss)
            L_gen = Expectation of (D(G(A)) - 1)^2
        Otherwise: (NLL Loss)
            L_gen = Expectation of -log(D(G(A)))
        """
        if self.opt.use_ragan and use_ragan:
            loss = 0.5 * (tf.reduce_mean(tf.square(D(normal) - tf.reduce_mean(D(enhanced)))) + \
                          tf.reduce_mean(tf.squared_difference(D(enhanced) - tf.reduce_mean(D(normal)), 1.0)))
        elif self.opt.gan_mode == 'lsgan':
            loss = tf.reduce_mean(tf.squared_difference(D(enhanced), 1.0))
        elif self.opt.gan_mode == 'vanilla':
            loss = -1 * tf.reduce_mean(tf.log(D(enhanced) + eps))

        return loss

    def __perceptual_loss(self,
                          low,
                          enhanced,
                          low_patch=None,
                          enhanced_patch=None,
                          low_patches=None,
                          enhanced_patches=None):
        """
        Compute the self feature preserving loss on the low-light and enhanced image.
        """
        features_low = self.__vgg16_features(low)
        features_normal = self.__vgg16_features(enhanced)

        if self.opt.patch_vgg:
            features_low_patch = self.__vgg16_features(low_patch)
            features_normal_patch = self.__vgg16_features(enhanced_patch)

        if self.opt.patchD_3 > 0:
            features_low_patches = self.__vgg16_features(low_patches)
            features_normal_patches = self.__vgg16_features(enhanced_patches)

        if self.opt.no_vgg_instance:
            loss = tf.reduce_mean(
                tf.squared_difference(features_low, features_normal))

            if self.opt.patch_vgg:
                loss += tf.reduce_mean(
                    tf.squared_difference(features_low_patch,
                                          features_normal_patch))

            if self.opt.patchD_3 > 0:
                loss += tf.reduce_mean(
                    tf.squared_difference(features_low_patches,
                                          features_normal_patches))
        else:
            loss = tf.reduce_mean(
                tf.squared_difference(
                    instance_norm(features_low, name='low_weights'),
                    instance_norm(features_normal, name='normal_weights')))

            if self.opt.patch_vgg:
                loss += tf.reduce_mean(
                    tf.squared_difference(
                        instance_norm(features_low_patch,
                                      name='low_patch_weights'),
                        instance_norm(features_normal_patch,
                                      name='normal_patch_weights')))

            if self.opt.patchD_3 > 0:
                loss += tf.reduce_mean(
                    tf.squared_difference(
                        instance_norm(features_low_patches,
                                      name='low_patches_weights'),
                        instance_norm(features_normal_patches,
                                      name='normal_patches_weights')))

        return loss

    def __vgg16_features(self, image):
        """
        Extract features from image using VGG16 model.
        """
        vgg16_in = tf.keras.applications.vgg16.preprocess_input(
            (image + 1) * 127.5)
        x = vgg16_in

        for i in range(len(self.vgg16.layers)):
            x = self.vgg16.layers[i](x)

            if self.vgg16.layers[i].name == self.opt.vgg_choose:
                break

        vgg16_features = x

        return vgg16_features

    def __optimizers(self, Gen_loss, D_loss, D_P_loss=None):
        """
        Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
        https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
        """
        def make_optimizer(loss, variables, name='Adam'):
            """ Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
                and a linearly decaying rate that goes to zero over the next 100k steps
            """
            global_step = tf.Variable(0, trainable=False, name='global_step')
            starter_learning_rate = self.opt.lr
            end_learning_rate = 0.0
            start_decay_step = self.opt.niter
            decay_steps = self.opt.niter_decay
            beta1 = self.opt.beta1
            learning_rate = (tf.where(
                tf.greater_equal(global_step, start_decay_step),
                tf.train.polynomial_decay(starter_learning_rate,
                                          global_step - start_decay_step,
                                          decay_steps,
                                          end_learning_rate,
                                          power=1.0), starter_learning_rate))

            learning_step = (tf.train.AdamOptimizer(
                learning_rate, beta1=beta1,
                name=name).minimize(loss,
                                    global_step=global_step,
                                    var_list=variables))

            return learning_step

        Gen_optimizer = make_optimizer(Gen_loss,
                                       self.G.variables,
                                       name='Adam_Gen')
        D_optimizer = make_optimizer(D_loss, self.D.variables, name='Adam_D')

        optimizers = [Gen_optimizer, D_optimizer]

        if D_P_loss is not None:
            D_P_optimizer = make_optimizer(D_P_loss,
                                           self.D_P.variables,
                                           name='Adam_D_P')
            optimizers.append(D_P_optimizer)

        with tf.control_dependencies(optimizers):
            return tf.no_op(name='optimizers')
예제 #7
0
class CycleGANModel(BaseModel):
    """
    Implementation of CycleGAN model for image-to-image translation of unpaired
    data.

    Paper: https://arxiv.org/pdf/1703.10593.pdf
    """
    def __init__(self, opt, training):
        BaseModel.__init__(self, opt, training)

        # create dataset loaders
        if training:
            self.dataset = UnpairedDataset(opt, training)
            self.datasetA, self.datasetB = self.dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.dataB_iter = self.datasetB.make_initializable_iterator()
            self.realA = self.dataA_iter.get_next()
            self.realB = self.dataB_iter.get_next()
        else:
            self.dataset = SingleDataset(opt, training)
            self.datasetA = self.dataset.generate()
            self.dataA_iter = self.datasetA.make_initializable_iterator()
            self.realA = self.dataA_iter.get_next()

        # create placeholders for fake images
        self.fakeA = tf.placeholder(tf.float32,
            shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
        self.fakeB = tf.placeholder(tf.float32,
            shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])

    def build(self):
        """
        Build TensorFlow graph for CycleGAN model.
        """
        # add ops for generator (A->B) to graph
        self.G = Generator(channels=self.opt.channels, netG=self.opt.netG, ngf=self.opt.ngf,
                           norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
                           init_gain=self.opt.weight_init_gain, dropout=self.opt.dropout,
                           training=self.training, name='G')

        if self.training:
            # add ops for other generator (B->A) and discriminators to graph
            self.F = Generator(channels=self.opt.channels, netG=self.opt.netG, ngf=self.opt.ngf,
                               norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
                               init_gain=self.opt.weight_init_gain, dropout=self.opt.dropout,
                               training=self.training, name='F')
            self.D_A = Discriminator(channels=self.opt.channels, netD=self.opt.netD, n_layers=self.opt.n_layers,
                                     ndf=self.opt.ndf, norm_type=self.opt.layer_norm_type,
                                     init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
                                     training=self.training, gan_mode=self.opt.gan_mode, name='D_A')
            self.D_B = Discriminator(channels=self.opt.channels, netD=self.opt.netD, n_layers=self.opt.n_layers,
                                     ndf=self.opt.ndf, norm_type=self.opt.layer_norm_type,
                                     init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
                                     training=self.training, gan_mode=self.opt.gan_mode, name='D_B')

            # generate fake images
            fakeA = self.F(self.realB)
            fakeB = self.G(self.realA)

            # generate reconstructed images
            reconstructedA = self.F(fakeB)
            reconstructedB = self.G(fakeA)

            # generate identity mapping images
            identA = self.G(self.realB)
            identB = self.F(self.realA)

            tf.summary.image('A/original', batch_convert_2_int(self.realA))
            tf.summary.image('B/original', batch_convert_2_int(self.realB))
            tf.summary.image('A/generated', batch_convert_2_int(fakeA))
            tf.summary.image('B/generated', batch_convert_2_int(fakeB))
            tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))
            tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))

            # add loss ops to graph
            Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,
                                                       reconstructedB, identA, identB)

            # add optimizer ops to graph
            optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)

            return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
        else:  # only need generator from A->B during testing
            fakeB = self.G(self.realA)
            return fakeB

    def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):
        """
        Compute the losses for the generators and discriminators.
        """
        # compute the generators loss
        G_loss = self.__G_loss(self.D_B, fakeB)
        F_loss = self.__G_loss(self.D_A, fakeA)
        cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
        ident_loss = self.__identity_loss(identA, identB)
        Gen_loss = G_loss + F_loss + cc_loss + ident_loss

        # Compute the disciminators loss. Use fake images from image pool to improve stability
        D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
        D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)

        return Gen_loss, D_A_loss, D_B_loss

    def __D_loss(self, D, real, fake, eps=1e-12):
        """
        Compute the discriminator loss.

        If LSGAN is used: (MSE Loss)
            L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
        Otherwise: (NLL Loss)
            L_disc = -0.5 * [Expectation of log(D(B)) + Expectation of log(1 - D(G(A)))]
        """
        if self.opt.gan_mode == 'lsgan':
            loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \
                          tf.reduce_mean(tf.square(D(fake))))
        elif self.opt.gan_mode == 'vanilla':
            loss = -0.5 * (tf.reduce_mean(tf.log(D(real) + eps)) + \
                           tf.reduce_mean(tf.log(1 - D(fake) + eps)))

        return loss

    def __G_loss(self, D, fake, eps=1e-12):
        """
        Compute the generator loss.

        If LSGAN is used: (MSE Loss)
            L_gen = Expectation of (D(G(A)) - 1)^2
        Otherwise: (NLL Loss)
            L_gen = Expectation of -log(D(G(A)))
        """
        if self.opt.gan_mode == 'lsgan':
            loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
        elif self.opt.gan_mode == 'vanilla':
            loss = -1 * tf.reduce_mean(tf.log(D(fake) + eps))

        return loss

    def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
        """
        Compute the cycle consistenty loss.

        L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
                lamb * [Expectation of L1_norm(G(F(B)) - B)]
        """
        loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \
               self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))

        return loss

    def __identity_loss(self, identA, identB):
        """
        Compute the identity loss.

        L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
                             lamB * [Expectation of L1_norm(G(B) - B)]]
        """
        loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \
                                        self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))

        return loss

    def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
        """
        Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
        https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
        """
        def make_optimizer(loss, variables, name='Adam'):
            """ Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
                and a linearly decaying rate that goes to zero over the next 100k steps
            """
            global_step = tf.Variable(0, trainable=False, name='global_step')
            starter_learning_rate = self.opt.lr
            end_learning_rate = 0.0
            start_decay_step = self.opt.niter
            decay_steps = self.opt.niter_decay
            beta1 = self.opt.beta1
            learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),
                                      tf.train.polynomial_decay(starter_learning_rate,
                                                                global_step-start_decay_step,
                                                                decay_steps, end_learning_rate,
                                                                power=1.0),
                                      starter_learning_rate))

            learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)
                                .minimize(loss, global_step=global_step, var_list=variables))

            return learning_step

        Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')
        D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')
        D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')

        with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):
            return tf.no_op(name='optimizers')