Пример #1
0
    def model(self):
        X_reader = Reader(self.X_train_file,
                          name='X',
                          image_size=self.image_size,
                          batch_size=self.batch_size)
        Y_reader = Reader(self.Y_train_file,
                          name='Y',
                          image_size=self.image_size,
                          batch_size=self.batch_size)

        x = X_reader.feed()
        y = Y_reader.feed()

        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)

        # X -> Y
        fake_y = self.G(x)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)
        G_skin_loss = self.skin_loss(self.G, x, self.covered2)
        G_loss = G_gan_loss + cycle_loss + G_skin_loss
        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           self.fake_y,
                                           use_lsgan=self.use_lsgan)

        # Y -> X
        fake_x = self.F(y)
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)
        F_skin_loss = self.skin_loss(self.F, y, self.covered1)
        F_loss = F_gan_loss + cycle_loss + F_skin_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           self.fake_x,
                                           use_lsgan=self.use_lsgan)

        # summary

        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(self.F(self.G(x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(self.G(self.F(y))))

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #2
0
def main():

    picF = "picF"
    files = os.listdir(picF)[:1]

    sess = tf.InteractiveSession()

    global_step = tf.Variable(2501, name="global_step", trainable=False)

    sess.run(tf.global_variables_initializer())

    img = tf.read_file(os.path.join(picF, files[0]))
    img = tf.image.decode_jpeg(img)
    #img = utils.convert2float(img)
    img = tf.expand_dims(img, axis=0)

    tf.summary.image('real', img)
    tf.summary.scalar('test', global_step)
    outimg = utils.convert2float(img)
    tf.summary.image('out', utils.batch_convert2int(outimg))
    sdf = outimg.eval()
    print(sdf)
    sdf = utils.batch_convert2int(outimg).eval()
    print(sdf)
    summary_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('C:\\log')

    summary, _, s, outimg2 = sess.run([summary_op, img, global_step, outimg])
    train_writer.add_summary(summary)
    train_writer.flush()

    sess.close()
Пример #3
0
  def sample(self, input, G_or_F='G'):
    if G_or_F == 'G':
      image = utils.batch_convert2int(self.G(input))
    else:
      image = utils.batch_convert2int(self.F(input))

    image = tf.image.encode_jpeg(tf.squeeze(image, [0]))
    return image
Пример #4
0
    def model(self):
        X_reader = Reader(X_TRAIN_FILE, name='X')
        Y_reader = Reader(Y_TRAIN_FILE, name='Y')

        x = X_reader.feed()
        y = Y_reader.feed()

        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)

        # X -> Y
        G_gan_loss = self.generator_loss(self.G,
                                         self.D_Y,
                                         x,
                                         use_lsgan=self.use_lsgan)
        G_loss = G_gan_loss + cycle_loss
        D_Y_loss = self.discriminator_loss(self.G,
                                           self.D_Y,
                                           x,
                                           y,
                                           use_lsgan=self.use_lsgan)

        # Y -> X
        F_gan_loss = self.generator_loss(self.F,
                                         self.D_X,
                                         y,
                                         use_lsgan=self.use_lsgan)
        F_loss = F_gan_loss + cycle_loss
        D_X_loss = self.discriminator_loss(self.F,
                                           self.D_X,
                                           y,
                                           x,
                                           use_lsgan=self.use_lsgan)

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(self.F(self.G(x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(self.G(self.F(y))))

        self.summary = tf.summary.merge_all()
        self.saver = tf.train.Saver()

        return G_loss, D_Y_loss, F_loss, D_X_loss
Пример #5
0
 def model(self):
     x_nearest = tf.image.resize_images(self.x, (self.image_size, self.image_size),
                                        tf.image.ResizeMethod.NEAREST_NEIGHBOR)
     x_bilins = tf.image.resize_images(self.x, (self.image_size, self.image_size),
                                       tf.image.ResizeMethod.BILINEAR)
     x_bicubic = tf.image.resize_images(self.x, (self.image_size, self.image_size),
                                        tf.image.ResizeMethod.BICUBIC)
     tf.summary.scalar('Loss', self.loss)
     tf.summary.image('Origin image', utils.batch_convert2int(self.y))
     tf.summary.image('Near', utils.batch_convert2int(x_nearest))
     tf.summary.image('Bilinears', utils.batch_convert2int(x_bilins))
     tf.summary.image('Bicubic', utils.batch_convert2int(x_bicubic))
     tf.summary.image('Reconstruct', utils.batch_convert2int(self.output))
Пример #6
0
  def model(self):
    X_reader = Reader(self.X_train_file, name='X', image_size_w=self.image_size_w,
                      image_size_h=self.image_size_h, batch_size=self.batch_size)
    Y_reader = Reader(self.Y_train_file, name='Y', image_size_w=self.image_size_w,
                      image_size_h=self.image_size_h, batch_size=self.batch_size)

    x = X_reader.feed()
    y = Y_reader.feed()

    cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)
    W_a = self.disc_loss(self.D_Y, y, self.fake_y)
    W_b = self.disc_loss(self.D_X, x, self.fake_x)
    W = W_a + W_b

    GP_a = self.gradien_penalty(self.D_Y, y, self.fake_y)
    GP_b = self.gradien_penalty(self.D_X, x, self.fake_x)
    GP = GP_a + GP_b

    # X -> Y
    fake_y = self.G(x)
    G_gan_loss = self.gen_loss(self.D_Y, fake_y)

    # Y -> X
    fake_x = self.F(y)
    F_gan_loss = self.gen_loss(self.D_X, fake_x)

    loss_g = G_gan_loss + F_gan_loss

    G_loss = cycle_loss + loss_g
    C_loss = LAMBDA*GP + W

    # summary
    tf.summary.scalar('loss/G', G_loss)
    tf.summary.scalar('loss/C', C_loss)
    tf.summary.scalar('loss/GradientPenalty', GP)
    tf.summary.scalar('loss/cycle', cycle_loss)
    tf.summary.scalar('lr/learning_rate', self.learning_rate)

    tf.summary.image('X/generated', utils.batch_convert2int(
              tf.image.resize_images(self.G(x), (36, 136))))
    tf.summary.image('X/reconstruction', utils.batch_convert2int(
              tf.image.resize_images(self.F(self.G(x)), (36, 136))))
    tf.summary.image('Y/generated', utils.batch_convert2int(
              tf.image.resize_images(self.F(y), (36, 136))
    ))
    tf.summary.image('Y/reconstruction', utils.batch_convert2int(
              tf.image.resize_images(self.G(self.F(y)), (36, 136))
    ))

    return G_loss, C_loss, fake_y, fake_x
Пример #7
0
    def model(self):
        tf.summary.scalar("loss", self.loss)

        tf.summary.image('original image', self.x)
        tf.summary.image('predicted boundary',
                         utils.batch_convert2int(self.output))
        tf.summary.image('ground truth', self.y)
Пример #8
0
    def model(self):
        # X_reader = Reader('data/tfrecords/man2woman/man.tfrecords', name='X',
        #     image_size=self.image_size, batch_size=self.batch_size)
        # Y_reader = Reader('data/tfrecords/man2woman/woman.tfrecords', name='Y',
        #     image_size=self.image_size, batch_size=self.batch_size)

        # x = X_reader.feed()
        # y = Y_reader.feed()
        x = utils.get_img(self.file_x, self.image_size, self.image_size, self.batch_size)
        y = utils.get_img(self.file_y, self.image_size, self.image_size, self.batch_size)

        fake_y = self.G(x)
        fake_x = self.F(y)

        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)
        G_gan_loss, F_gan_loss, D_loss_x, D_loss_y = self.gan_loss(self.D_Y, self.D_X, 
                            self.x, self.y, x, y, fake_y, fake_x, self.use_mse)
        # G_gan_loss = tf.reduce_mean(tf.squared_difference(self.D_Y(fake_y), self.label))
        # F_gan_loss = tf.reduce_mean(tf.squared_difference(self.D_X(fake_x), self.label))
        # D_loss_x = self.discriminator_loss(self.D_X, x, self.x)
        # D_loss_y = self.discriminator_loss(self.D_Y, y, self.y)


        
        G_loss = G_gan_loss + cycle_loss
        F_loss = F_gan_loss + cycle_loss

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(self.y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(self.x)))
        tf.summary.histogram('D_X/true', self.D_X(self.x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(self.y)))

        tf.summary.scalar('loss/G', G_loss)
        tf.summary.scalar('loss/D_Y', D_loss_y)
        tf.summary.scalar('loss/F', F_loss)
        tf.summary.scalar('loss/D_X', D_loss_x)
        tf.summary.scalar('loss/cycle', cycle_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(self.x)))
        tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(self.x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(self.y)))
        tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(self.y))))

        return G_loss, F_loss, D_loss_x, D_loss_y, fake_y, fake_x
Пример #9
0
    def computeSwapScoreBKG(self, rep_Sy, rep_Ey, autoY):

        bkg_ims_idx = tf.random_uniform([self.batch_size],
                                        minval=0,
                                        maxval=self.batch_size,
                                        dtype=tf.int32)
        swapScoreBKG = 0
        #for i in range(0,3):
        for i in range(0, self.batch_size):
            s_curr = tf.reshape(
                rep_Sy[i, :],
                [1, rep_Sy.shape[1], rep_Sy.shape[2], rep_Sy.shape[3]])

            #print('I:'+str(i)+' paired with:'+str(bkg_ims_idx[i]))
            # Image to swap cannot be current image
            while bkg_ims_idx[i] == tf.Variable(i):
                pdb.set_trace()
                bkf_ims_idx[i] = tf.random_uniform([1],
                                                   minval=0,
                                                   maxval=self.batch_size,
                                                   dtype=tf.int32)

            s_rnd = tf.reshape(
                rep_Sy[bkg_ims_idx[i], :],
                [1, rep_Sy.shape[1], rep_Sy.shape[2], rep_Sy.shape[3]])
            ex_rnd = tf.reshape(
                rep_Ey[bkg_ims_idx[i], :],
                [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])
            im_swapped = self.Gd(tf.concat([s_curr, ex_rnd], 3))

            # Only show first 3
            if i < 3:
                tf.summary.image('ZSwap/im_' + str(i) + '_iswapped',
                                 utils.batch_convert2int(im_swapped))
                tf.summary.image(
                    'ZSwap/im_' + str(i) + '_orig',
                    utils.batch_convert2int(
                        tf.reshape(autoY[bkg_ims_idx[i], :], [1, 32, 32, 3])))
            swapScoreBKG += tf.reduce_mean(
                tf.abs(autoY[bkg_ims_idx[i], :4, :4, :] -
                       im_swapped[0, :4, :4, :]))
        return swapScoreBKG
Пример #10
0
    def swapExplicit(self, input1, input2):
        """ Given input image, return generated output in the other
    domain
    """
        rep1_Sy, rep1_Ey = self.Fe(input1)
        rep2_Sy, rep2_Ey = self.Fe(input2)

        input1_Fd = tf.concat([rep1_Sy, rep2_Ey], 3)
        output1_decoder = self.Gd(input1_Fd)

        input2_Fd = tf.concat([rep2_Sy, rep1_Ey], 3)
        output2_decoder = self.Gd(input2_Fd)

        image1 = utils.batch_convert2int(output1_decoder)
        image1 = tf.image.encode_jpeg(tf.squeeze(image1, [0]))

        image2 = utils.batch_convert2int(output2_decoder)
        image2 = tf.image.encode_jpeg(tf.squeeze(image2, [0]))

        return image1, image2
Пример #11
0
  def model(self):
    X_reader = Reader(self.X_train_file, name='X',
        image_size=self.image_size, batch_size=self.batch_size)
    Y_reader = Reader(self.Y_train_file, name='Y',
        image_size=self.image_size, batch_size=self.batch_size)

    x = X_reader.feed()
    y = Y_reader.feed()

    cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)

    # X -> Y
    fake_y = self.G(x)
    G_gan_loss = self.generator_loss(self.D_Y, fake_y, use_lsgan=self.use_lsgan)
    G_loss =  G_gan_loss + cycle_loss
    D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=self.use_lsgan)

    # Y -> X
    fake_x = self.F(y)
    F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=self.use_lsgan)
    F_loss = F_gan_loss + cycle_loss
    D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=self.use_lsgan)

    # summary
    tf.summary.histogram('D_Y/true', self.D_Y(y))
    tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
    tf.summary.histogram('D_X/true', self.D_X(x))
    tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

    tf.summary.scalar('loss/G', G_gan_loss)
    tf.summary.scalar('loss/D_Y', D_Y_loss)
    tf.summary.scalar('loss/F', F_gan_loss)
    tf.summary.scalar('loss/D_X', D_X_loss)
    tf.summary.scalar('loss/cycle', cycle_loss)

    tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
    tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(x))))
    tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
    tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(y))))

    return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #12
0
  def model(self):
      X_reader = Reader(self.X_train_file,name='X',image_size=self.image_size,batch_size=self.batch_size)
      Y_reader = Reader(self.Y_train_file, name='Y',
        image_size=self.image_size, batch_size=self.batch_size) #读取X,Y文件

      x = X_reader.feed()
      y = Y_reader.feed() #将X,Y填入x,y

      cycle_loss = self.cycle_consistency_loss(self.G,self.F,x,y) #循环损失

      #X->Y
      fake_y = self.G(x) #由生成网络生成的fake y
      G_gan_loss = self.generator_loss(self.D_Y,fake_y,use_lsgan = self.use_lsgan) #生成网络的GANloss;要注意一下,G_gan_loss中用的是fake_y = self.G(x);而D_Y_loss中用的是self.fake_y;在原来将GAN时,G,D是分开训练的,分别是,D训练一次,然后G训练一次,G_loss中输入的是当下网络的fake_y,D_Y_loss中,输入的是上次训练好的G中的fake_y???
      G_loss = G_gan_loss + cycle_loss #生成网络的loss
      D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=self.use_lsgan) #正向判别损失

      #Y->X
      fake_x = self.F(y) #由逆向生成网络生成的fake x
      F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=self.use_lsgan) #逆向生成网络的 GAN损失
      F_loss = F_gan_loss + cycle_loss #逆向生成网络的损失
      D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=self.use_lsgan) #逆向 判别损失

      # summary
      tf.summary.histogram('D_Y/true', self.D_Y(y))
      tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
      tf.summary.histogram('D_X/true', self.D_X(x))
      tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

      tf.summary.scalar('loss/G', G_gan_loss)
      tf.summary.scalar('loss/D_Y', D_Y_loss)
      tf.summary.scalar('loss/F', F_gan_loss)
      tf.summary.scalar('loss/D_X', D_X_loss)
      tf.summary.scalar('loss/cycle', cycle_loss)

      tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
      tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(x))))
      tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
      tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(y))))

      return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #13
0
def check_tf_reader():
    """ 检验reader读取的图像结果是否正确
  """

    TFrecordPath = './TFrecord'  #设置TFrecord文件夹路径
    #image_height,image_width,image_mode= utils.get_image_info('./Data/train/airplane/airplane5.png')
    image_width = 128
    image_heigth = 128
    image_mode = 'L'
    if not os.path.exists('./image'):
        os.makedirs('./image')
    with tf.Graph().as_default():
        reader = tf_datareader('./TFrecord',
                               image_heigth,
                               image_width,
                               image_mode,
                               batch_size=1,
                               min_queue_examples=1024,
                               num_threads=1024,
                               name='datareader')
        image, label = reader.pipeline_read('train')
        #tensorflow中要求灰度图为[h,w,1],但fromarray要求灰度图为[h,w],因此需要处理一下
        if image_mode == 'L':
            image = utils.batch_gray_reshape(image, image_heigth, image_width)
        #float存储图像显示有问题
        image = utils.batch_convert2int(image)
        with tf.Session() as sess:
            init_op = tf.global_variables_initializer()
            sess.run(init_op)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            try:
                #执行3次,每次取13张样例,确认批样例随机读取代码的正确性
                for k in range(1):
                    if not coord.should_stop():
                        example, l = sess.run([image, label])
                        print(example[0])
                        for i in range(1):
                            img = Img.fromarray(example[i], image_mode)
                            img.save('image/' + str(k) + '_' + str(i) +
                                     '_Label_' + str(l[i]) + '.jpg')
            except KeyboardInterrupt:
                print('Interrupted')
                coord.request_stop()
            except tf.errors.OutOfRangeError:
                print('OutOfRangeError')
                coord.request_stop()
            finally:
                # When done, ask the threads to stop.
                coord.request_stop()
                coord.join(threads)
Пример #14
0
    def sampleY2X(self, input):
        """ Given input image, return generated output in the other
    domain
    """
        rep_Sy, rep_Ey = self.Fe(input)

        # For now, make noise positive, as it takes the place of ReLU output
        #noise = tf.random_normal(rep_Ey.shape, mean=self.meanNoise,
        #stddev=self.stddevNoise)
        noise = tf.zeros(rep_Ey.shape)

        input_Fd = tf.concat([noise, rep_Sy], 3)

        output_decoder = self.Fd(input_Fd)

        image = utils.batch_convert2int(output_decoder)
        image = tf.image.encode_jpeg(tf.squeeze(image, [0]))

        return image
Пример #15
0
    def sampleX2Y(self, input):
        """ Given input image, return generated output in the other
    domain
    """
        rep_Sx, rep_Ex = self.Ge(input)

        #mean_X, var_X = tf.nn.moments(rep_Ex, axes=[0,1,2])

        ##For now, make noise positive, as it takes the place of ReLU output
        #noise = tf.random_normal(rep_Ex.shape, mean=mean_X,
        #stddev=0.0*tf.sqrt(var_X))

        noise = tf.zeros(rep_Ex.shape)

        # Here, the exlusive bit comes before the shared part
        input_Gd = tf.concat([rep_Sx, noise], 3)

        output_decoder = self.Gd(input_Gd)

        image = utils.batch_convert2int(output_decoder)
        image = tf.image.encode_jpeg(tf.squeeze(image, [0]))

        return image
Пример #16
0
    def model(self):
        XY_reader = ReaderPaired(self.XY_train_file,
                                 name='XY',
                                 image_size=self.image_size,
                                 batch_size=self.batch_size)

        xy = XY_reader.feed()

        # Split returned batch into both domains
        x = xy[0]
        y = xy[1]

        # Generate representation with encoders
        # X -> Y
        rep_Sx, rep_Ex = self.Ge(x)
        # Y -> X
        rep_Sy, rep_Ey = self.Fe(y)

        # Compute stddevs of exclusive parts for noise generation
        mean_X, var_X = tf.nn.moments(rep_Ex, axes=[0, 1, 2])
        mean_Y, var_Y = tf.nn.moments(rep_Ey, axes=[0, 1, 2])

        #### G block, X --> Y
        noise = tf.random_normal(rep_Ey.shape,
                                 mean=mean_Y,
                                 stddev=tf.sqrt(var_Y))

        # Here, the exlusive bit comes before the shared part
        input_Gd = tf.concat([rep_Sx, noise], 3)

        fake_y = self.Gd(input_Gd)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)

        # Add reconstruction loss on shared features
        repR_Sx, _ = self.Fe(fake_y)
        X_features_loss = tf.reduce_mean(tf.abs(repR_Sx - rep_Sx))

        #G_recon_loss = tf.reduce_mean(tf.abs(fake_y-y))

        # Reverse gradient layer as maximing gan loss from exclusive part
        fake_ex_y = self.Gdex(rep_Ex)

        #fake_ex_y = self.Gdex(rep_Sx,rep_Ex)
        Gdex_loss = self.generator_loss(self.Dex_Y,
                                        fake_ex_y,
                                        use_lsgan=self.use_lsgan)

        G_loss = G_gan_loss + Gdex_loss

        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           self.fake_y,
                                           use_lsgan=self.use_lsgan)
        Dex_Y_loss = self.discriminator_loss(self.Dex_Y,
                                             y,
                                             self.fake_ex_y,
                                             use_lsgan=self.use_lsgan)

        #### F block, Y-->X
        noise = tf.random_normal(rep_Ex.shape,
                                 mean=mean_X,
                                 stddev=tf.sqrt(var_X))

        input_Fd = tf.concat([noise, rep_Sy], 3)

        fake_x = self.Fd(input_Fd)
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)

        repR_Sy, _ = self.Ge(fake_x)
        Y_features_loss = tf.reduce_mean(tf.abs(repR_Sy - rep_Sy))

        #F_recon_loss = tf.reduce_mean(tf.abs(fake_x-x))

        # Reverse gradient layer as maximing gan loss from exclusive part
        fake_ex_x = self.Fdex(rep_Ey)
        Fdex_loss = self.generator_loss(self.Dex_X,
                                        fake_ex_x,
                                        use_lsgan=self.use_lsgan)

        F_loss = F_gan_loss + Fdex_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           self.fake_x,
                                           use_lsgan=self.use_lsgan)
        Dex_X_loss = self.discriminator_loss(self.Dex_X,
                                             x,
                                             self.fake_ex_x,
                                             use_lsgan=self.use_lsgan)

        # Alignment loss for autoencoders
        alignment_X_loss = tf.reduce_mean(
            tf.abs(self.Fd(tf.concat([rep_Ex, rep_Sx], 3)) - x))
        alignment_Y_loss = tf.reduce_mean(
            tf.abs(self.Gd(tf.concat([rep_Sy, rep_Ey], 3)) - y))

        # Add feature reconstruction loss to alignment as they work on same var set
        A_loss = alignment_X_loss + alignment_Y_loss

        Feat_loss = X_features_loss + Y_features_loss

        multiply = tf.constant([self.batch_size])
        dom_labels_x = tf.reshape(tf.tile(tf.constant([1.0, 0.0]), multiply),
                                  [multiply[0], 2])
        dom_labels_y = tf.reshape(tf.tile(tf.constant([0.0, 1.0]), multiply),
                                  [multiply[0], 2])
        dc_loss_x = self.domainClassifier_loss(self.DC, rep_Sx, dom_labels_x)
        dc_loss_y = self.domainClassifier_loss(self.DC, rep_Sy, dom_labels_y)

        DC_pred_X = tf.nn.softmax(self.DC(rep_Sx))
        DC_pred_Y = tf.nn.softmax(self.DC(rep_Sy))
        DC_loss = dc_loss_x + dc_loss_y

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.Gd(input_Gd)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.Fd(input_Fd)))
        tf.summary.histogram('Dex_Y/true', self.Dex_Y(y))
        tf.summary.histogram('Dex_Y/fake', self.Dex_Y(self.Gdex(rep_Ex)))
        tf.summary.histogram('Dex_X/true', self.Dex_X(x))
        tf.summary.histogram('Dex_X/fake', self.Dex_X(self.Fdex(rep_Ey)))

        tf.summary.histogram('RepX/exc', rep_Ex)
        tf.summary.histogram('RepX/gen', rep_Sx)
        tf.summary.histogram('RepX/noise', noise)

        tf.summary.histogram('RepY/exc', rep_Ex)
        tf.summary.histogram('RepY/gen', rep_Sx)
        tf.summary.histogram('RepY/noise', noise)

        tf.summary.histogram('DC/X/scoreX', DC_pred_X[:, 0])
        tf.summary.histogram('DC/X/scoreY', DC_pred_X[:, 1])
        tf.summary.histogram('DC/Y/scoreX', DC_pred_Y[:, 0])
        tf.summary.histogram('DC/Y/scoreY', DC_pred_Y[:, 1])

        tf.summary.scalar('loss/G_total', G_loss)
        tf.summary.scalar('loss/G_gan', G_gan_loss)
        tf.summary.scalar('loss/Gdex_gan', Gdex_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/Dex_Y', Dex_Y_loss)
        tf.summary.scalar('loss/F_total', F_loss)
        tf.summary.scalar('loss/F_gan', F_gan_loss)
        tf.summary.scalar('loss/Fdex_gan', Fdex_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/Dex_X', Dex_X_loss)
        tf.summary.scalar('loss/alignment_X', alignment_X_loss)
        tf.summary.scalar('loss/alignment_Y', alignment_Y_loss)
        tf.summary.scalar('loss/DC_loss_x', dc_loss_x)
        tf.summary.scalar('loss/DC_loss_y', dc_loss_y)
        tf.summary.scalar('loss/X_features_loss', X_features_loss)
        tf.summary.scalar('loss/Y_features_loss', Y_features_loss)

        tf.summary.image('X/generated',
                         utils.batch_convert2int(self.Gd(input_Gd)))

        noise2 = tf.random_normal(rep_Ey.shape,
                                  mean=mean_Y,
                                  stddev=tf.sqrt(var_Y))

        tf.summary.image(
            'X/generated2',
            utils.batch_convert2int(self.Gd(tf.concat([rep_Sx, noise2], 3))))

        # swap representation
        #pdb.set_trace()
        ex1 = tf.reshape(
            rep_Ey[0, :],
            [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])
        s1 = tf.reshape(rep_Sy[0, :],
                        [1, rep_Sy.shape[1], rep_Sy.shape[2], rep_Sy.shape[3]])
        ex2 = tf.reshape(
            rep_Ey[1, :],
            [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])
        s2 = tf.reshape(rep_Sy[1, :],
                        [1, rep_Sy.shape[1], rep_Sy.shape[2], rep_Sy.shape[3]])

        ex3 = tf.reshape(
            rep_Ey[2, :],
            [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])

        tf.summary.image(
            'X/im1bk2',
            utils.batch_convert2int(self.Gd(tf.concat([s1, ex2], 3))))

        tf.summary.image(
            'X/im2bk1',
            utils.batch_convert2int(self.Gd(tf.concat([s2, ex1], 3))))

        #tf.summary.image('X/sanitycheckim1bk1',
        #                utils.batch_convert2int(self.Gd(tf.concat([s1, ex1],3))))

        tf.summary.image(
            'X/im2bk3',
            utils.batch_convert2int(self.Gd(tf.concat([s2, ex3], 3))))

        tf.summary.image(
            'X/autoencoder_rec',
            utils.batch_convert2int(self.Fd(tf.concat([rep_Ex, rep_Sx], 3))))
        tf.summary.image('X/exclusive_rec',
                         utils.batch_convert2int(self.Gdex(rep_Ex)))

        tf.summary.image('Y/generated',
                         utils.batch_convert2int(self.Fd(input_Fd)))
        tf.summary.image('Y/autoencoder_rec',
                         utils.batch_convert2int(
                             self.Gd(tf.concat([rep_Sy, rep_Ey], 3))),
                         max_outputs=3)
        tf.summary.image('Y/exclusive_rec',
                         utils.batch_convert2int(self.Fdex(rep_Ey)))

        # swap representation, X images
        ex1X = tf.reshape(
            rep_Ex[0, :],
            [1, rep_Ex.shape[1], rep_Ex.shape[2], rep_Ex.shape[3]])
        s1X = tf.reshape(
            rep_Sx[0, :],
            [1, rep_Sx.shape[1], rep_Sx.shape[2], rep_Sx.shape[3]])
        ex2X = tf.reshape(
            rep_Ex[1, :],
            [1, rep_Ex.shape[1], rep_Ex.shape[2], rep_Ex.shape[3]])
        s2X = tf.reshape(
            rep_Sx[1, :],
            [1, rep_Sx.shape[1], rep_Sx.shape[2], rep_Sx.shape[3]])

        tf.summary.image(
            'Y/im1bk2',
            utils.batch_convert2int(self.Fd(tf.concat([ex2X, s1X], 3))))

        tf.summary.image(
            'Y/im2bk1',
            utils.batch_convert2int(self.Fd(tf.concat([ex1X, s2X], 3))))

        tf.summary.image(
            'Y/im2bkg0',
            utils.batch_convert2int(
                self.Fd(tf.concat([tf.zeros(ex1X.shape), s2X], 3))))

        return G_loss, D_Y_loss, Dex_Y_loss, F_loss, D_X_loss, Dex_X_loss, A_loss, Feat_loss, DC_loss, fake_y, fake_x, fake_ex_y, fake_ex_x
Пример #17
0
 def sample(self, input):
   image = utils.batch_convert2int(self.__call__(input))
   image = tf.image.encode_jpeg(tf.squeeze(image, [0]))
   return image
Пример #18
0
    def model(self):

        vgg_mean_pixel = tf.cast(self.vgg_mean_pixel, tf.float32)

        fake_y = self.G(self.x_image)
        fake_x = self.F(self.y_image)
        # cycle loss
        cycle_loss = self.cycle_consistency_loss(self.G, self.F, self.x_image,
                                                 self.y_image, fake_x, fake_y)
        # ink_loss
        #ink_loss = self.discriminator_loss(self.D_Y, self.y_image, fake_y, gan="ink_loss")

        # identity loss
        #id_loss = self.cycle_consistency_loss(self.G, self.F, self.x_image, self.y_image, self.y_image, self.x_image)

        pre_x_image = tf.subtract(self.x_image, vgg_mean_pixel)
        vgg_x = vgg.net_preloaded(self.vgg_weights, pre_x_image, max)
        pre_fake_y_image = tf.subtract(self.fake_y, vgg_mean_pixel)
        vgg_fake_y = vgg.net_preloaded(self.vgg_weights, pre_fake_y_image, max)

        pre_y_image = tf.subtract(self.y_image, vgg_mean_pixel)
        vgg_y = vgg.net_preloaded(self.vgg_weights, pre_y_image, max)
        pre_fake_x_image = tf.subtract(self.fake_x, vgg_mean_pixel)
        vgg_fake_x = vgg.net_preloaded(self.vgg_weights, pre_fake_x_image, max)

        # content loss
        index = CONTENT_LAYERS
        content_loss = self.content_loss(vgg_x[index], vgg_y[index],
                                         vgg_fake_x[index], vgg_fake_y[index])

        # X -> Y
        G_gan_loss = self.generator_loss(self.D_Y, fake_y, gan=cfg.gan)
        G_loss = G_gan_loss + cycle_loss + content_loss
        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           self.y_image,
                                           self.fake_y,
                                           gan=cfg.gan)

        # Y -> X
        F_gan_loss = self.generator_loss(self.D_X, fake_x, gan=cfg.gan)
        F_loss = F_gan_loss + cycle_loss + content_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           self.x_image,
                                           self.fake_x,
                                           gan=cfg.gan)

        # summary
        tf.summary.histogram('D_Y/true',
                             tf.reduce_mean(self.D_Y(self.y_image)))
        tf.summary.histogram('D_Y/fake',
                             tf.reduce_mean(self.D_Y(self.G(self.x_image))))
        tf.summary.histogram('D_X/true',
                             tf.reduce_mean(self.D_X(self.x_image)))
        tf.summary.histogram('D_X/fake',
                             tf.reduce_mean(self.D_X(self.F(self.y_image))))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)
        #tf.summary.scalar('loss/ink', ink_loss)
        #tf.summary.scalar('loss/id', id_loss)
        tf.summary.scalar('loss/id', content_loss)

        x_generate = fake_y
        x_reconstruct = self.F(fake_y)

        y_generate = fake_x
        y_reconstruct = self.G(fake_x)

        tf.summary.scalar('debug/real_x_mean', tf.reduce_mean(self.x_image))
        tf.summary.scalar('debug/fake_x_mean', tf.reduce_mean(y_generate))
        tf.summary.scalar('debug/real_y_mean', tf.reduce_mean(self.y_image))
        tf.summary.scalar('debug/fake_y_mean', tf.reduce_mean(x_generate))

        tf.summary.image('X/input',
                         utils.batch_convert2int(self.x_image[:, :, :, :3]))
        tf.summary.image('X/generated', utils.batch_convert2int(x_generate))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(x_reconstruct[:, :, :, :3]))
        tf.summary.image('Y/input', utils.batch_convert2int(self.y_image))
        tf.summary.image('Y/generated',
                         utils.batch_convert2int(y_generate[:, :, :, :3]))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(y_reconstruct))

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #19
0
 def sample(self, input):
     return tf.image.encode_jpeg(
         tf.squeeze(utils.batch_convert2int(self.__call__(input)), [0]))
Пример #20
0
    def model(self):
        x = self.x
        y = self.y

        #self.fake_x = self.F(self.y)
        #self.fake_y = self.G(self.x)
        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)

        # X -> Y
        fake_y = self.G(x)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         self.y_label,
                                         use_lsgan=self.use_lsgan)
        G_loss = G_gan_loss + cycle_loss
        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           fake_y,
                                           self.y_label,
                                           use_lsgan=self.use_lsgan)

        # Y -> X
        fake_x = self.F(y)
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         self.x_label,
                                         use_lsgan=self.use_lsgan)
        F_loss = F_gan_loss + cycle_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           fake_x,
                                           self.x_label,
                                           use_lsgan=self.use_lsgan)

        # fuzzy
        Fuzzy_x_loss, feature_x = self.fuzzy_loss(self.C, x, self.Ux2y,
                                                  self.ClusterX)
        Fuzzy_y_loss, feature_y = self.fuzzy_loss(self.C, fake_x, self.Uy2x,
                                                  self.ClusterY)
        Disperse_loss = -self.disperse_loss(feature_y, self.Uy2x)
        Fuzzy_loss = Fuzzy_x_loss + Fuzzy_y_loss  #+Disperse_loss

        #feature_x = self.C(x)
        #feature_y = self.C(fake_x)
        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)
        tf.summary.scalar('loss/Disperse', Disperse_loss)
        tf.summary.scalar('loss/Fuzzy', Fuzzy_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(self.F(self.G(x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(self.G(self.F(y))))

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x, Disperse_loss, Fuzzy_loss, feature_x, feature_y, self.F(
            fake_y), self.G(fake_x)
Пример #21
0
    def model(self):
        XY_reader = ReaderPaired(self.XY_train_file,
                                 name='XY',
                                 image_size=self.image_size,
                                 batch_size=self.batch_size)

        xy = XY_reader.feed()

        # Split returned batch into both domains
        x = xy[0]
        y = xy[1]

        # Generate representation with encoders
        # X -> Y
        rep_Sx, rep_Ex = self.Ge(x)
        # Y -> X
        rep_Sy, rep_Ey = self.Fe(y)

        # Compute stddevs of exclusive parts for noise generation
        #mean_X, var_X = tf.nn.moments(rep_Ex, axes=[0,1,2])
        #mean_Y, var_Y = tf.nn.moments(rep_Ey, axes=[0,1,2])

        #### G block, X --> Y

        # Generate exlusive representation using noise
        noise_Y = tf.random_normal([self.batch_size, 1, 1, 10])
        fake_Zex_y = self.ZEx_Y(noise_Y)
        #ZEx_Y_loss = tf.reduce_mean(tf.abs(fake_Zex_y-rep_Ey))
        ZEx_Y_loss = self.generator_loss_1input(self.DZEx_Y,
                                                fake_Zex_y,
                                                use_lsgan=self.use_lsgan)

        DZEx_Y_loss = self.discriminator_loss_1input(self.DZEx_Y,
                                                     rep_Ey,
                                                     self.fake_Zex_y,
                                                     use_lsgan=self.use_lsgan)

        # Here, the exlusive bit comes before the shared part
        input_Gd = tf.concat([rep_Sx, fake_Zex_y], 3)

        fake_y = self.Gd(input_Gd)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         x,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)

        # Add reconstruction loss on shared features
        #repR_Sx, repR_Ex = self.Fe(fake_y)
        #X_features_loss = tf.reduce_mean(tf.abs(repR_Sx - rep_Sx))

        #X_noise_loss = tf.reduce_mean(tf.abs(repR_Ex - noise))

        #X_features_noise_loss = X_features_loss + X_noise_loss

        #G_recon_loss = tf.reduce_mean(tf.abs(fake_y-y))

        # Reverse gradient layer as maximing gan loss from exclusive part
        fake_ex_y = self.Gdex(rep_Ex)

        #fake_ex_y = self.Gdex(rep_Sx,rep_Ex)
        Gdex_loss = self.generator_loss(self.Dex_Y,
                                        x,
                                        fake_ex_y,
                                        use_lsgan=self.use_lsgan)

        # Try to get the exclusive rep input with the image
        #_, repExc_Ex = self.Geex(fake_ex_y)
        #Geex_loss = tf.reduce_mean(tf.abs(repExc_Ex - rep_Ex))

        G_loss = G_gan_loss + Gdex_loss  # + Geex_loss
        G_loss = self.weightGAN * G_loss

        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           x,
                                           y,
                                           self.fake_y,
                                           use_lsgan=self.use_lsgan)
        Dex_Y_loss = self.discriminator_loss(self.Dex_Y,
                                             x,
                                             y,
                                             self.fake_ex_y,
                                             use_lsgan=self.use_lsgan)

        #### F block, Y-->X
        # Generate exlusive representation using noise
        noise_X = tf.random_normal([self.batch_size, 1, 1, 10])
        fake_Zex_x = self.ZEx_X(noise_X)
        ZEx_X_loss = self.generator_loss_1input(self.DZEx_X,
                                                fake_Zex_x,
                                                use_lsgan=self.use_lsgan)

        DZEx_X_loss = self.discriminator_loss_1input(self.DZEx_X,
                                                     rep_Ey,
                                                     self.fake_Zex_x,
                                                     use_lsgan=self.use_lsgan)

        #ZEx_X_loss = tf.reduce_mean(tf.abs(fake_Zex_x-rep_Ex))

        ZEx_loss = ZEx_X_loss + ZEx_Y_loss

        input_Fd = tf.concat([fake_Zex_x, rep_Sy], 3)

        fake_x = self.Fd(input_Fd)
        F_gan_loss = self.generator_loss(self.D_X,
                                         y,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)

        #repR_Sy,_ = self.Ge(fake_x)
        #Y_features_loss = tf.reduce_mean(tf.abs(repR_Sy - rep_Sy))

        #F_recon_loss = tf.reduce_mean(tf.abs(fake_x-x))

        # Reverse gradient layer as maximing gan loss from exclusive part
        fake_ex_x = self.Fdex(rep_Ey)
        Fdex_loss = self.generator_loss(self.Dex_X,
                                        y,
                                        fake_ex_x,
                                        use_lsgan=self.use_lsgan)

        #_, repExc_Ey = self.Feex(fake_ex_y)
        #Feex_loss = tf.reduce_mean(tf.abs(repExc_Ey - rep_Ey))

        F_loss = F_gan_loss + Fdex_loss  # + Feex_loss
        F_loss = self.weightGAN * F_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           y,
                                           x,
                                           self.fake_x,
                                           use_lsgan=self.use_lsgan)
        Dex_X_loss = self.discriminator_loss(self.Dex_X,
                                             y,
                                             x,
                                             self.fake_ex_x,
                                             use_lsgan=self.use_lsgan)

        # Alignment loss for autoencoders
        alignment_X_loss = tf.reduce_mean(
            tf.abs(self.Fd(tf.concat([rep_Ex, rep_Sx], 3)) - x))
        alignment_Y_loss = tf.reduce_mean(
            tf.abs(self.Gd(tf.concat([rep_Sy, rep_Ey], 3)) - y))

        # Add feature reconstruction loss to alignment as they work on same var set
        A_loss = alignment_X_loss + 10 * alignment_Y_loss

        #Feat_loss = X_features_loss + Y_features_loss

        # Feature reconstruction loss for the paired case
        Feat_loss = tf.reduce_mean(tf.abs(rep_Sx - rep_Sy))

        multiply = tf.constant([self.batch_size])
        dom_labels_x = tf.reshape(tf.tile(tf.constant([1.0, 0.0]), multiply),
                                  [multiply[0], 2])
        dom_labels_y = tf.reshape(tf.tile(tf.constant([0.0, 1.0]), multiply),
                                  [multiply[0], 2])
        dc_loss_x = self.domainClassifier_loss(self.DC, rep_Sx, dom_labels_x)
        dc_loss_y = self.domainClassifier_loss(self.DC, rep_Sy, dom_labels_y)

        DC_pred_X = tf.nn.softmax(self.DC(rep_Sx))
        DC_pred_Y = tf.nn.softmax(self.DC(rep_Sy))
        DC_loss = dc_loss_x + dc_loss_y

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(x, y))
        tf.summary.histogram('D_Y/fake', self.D_Y(x, self.Gd(input_Gd)))
        tf.summary.histogram('D_X/true', self.D_X(y, x))
        tf.summary.histogram('D_X/fake', self.D_X(y, self.Fd(input_Fd)))
        tf.summary.histogram('Dex_Y/true', self.Dex_Y(x, y))
        tf.summary.histogram('Dex_Y/fake', self.Dex_Y(x, self.Gdex(rep_Ex)))
        tf.summary.histogram('Dex_X/true', self.Dex_X(y, x))
        tf.summary.histogram('Dex_X/fake', self.Dex_X(y, self.Fdex(rep_Ey)))
        #tf.summary.histogram('DZex_Y/true', self.DZEx_Y(x,y))
        #tf.summary.histogram('Dex_Y/fake', self.Dex_Y(x,self.Gdex(rep_Ex)))
        #tf.summary.histogram('Dex_X/true', self.Dex_X(y,x))
        #tf.summary.histogram('Dex_X/fake', self.Dex_X(y,self.Fdex(rep_Ey)))

        tf.summary.histogram('RepX/exc', rep_Ex)
        tf.summary.histogram('RepX/gen', rep_Sx)
        tf.summary.histogram('RepX/noise', noise_X)

        tf.summary.histogram('RepY/exc', rep_Ey)
        tf.summary.histogram('RepY/gen', rep_Sy)
        tf.summary.histogram('RepY/noise', noise_Y)

        tf.summary.histogram('DC/X/scoreX', DC_pred_X[:, 0])
        tf.summary.histogram('DC/X/scoreY', DC_pred_X[:, 1])
        tf.summary.histogram('DC/Y/scoreX', DC_pred_Y[:, 0])
        tf.summary.histogram('DC/Y/scoreY', DC_pred_Y[:, 1])

        tf.summary.scalar('loss/G_total', G_loss)
        tf.summary.scalar('loss/G_gan', G_gan_loss)
        tf.summary.scalar('loss/Gdex_gan', Gdex_loss)
        tf.summary.scalar('loss/ZEx_loss', ZEx_loss)
        #tf.summary.scalar('loss/Geex_gan', Geex_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/Dex_Y', Dex_Y_loss)
        tf.summary.scalar('loss/DZEx_Y', DZEx_Y_loss)
        tf.summary.scalar('loss/F_total', F_loss)
        tf.summary.scalar('loss/F_gan', F_gan_loss)
        tf.summary.scalar('loss/Fdex_gan', Fdex_loss)
        #tf.summary.scalar('loss/Feex_gan', Feex_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/Dex_X', Dex_X_loss)
        tf.summary.scalar('loss/DZEx_X', DZEx_X_loss)
        tf.summary.scalar('loss/alignment_X', alignment_X_loss)
        tf.summary.scalar('loss/alignment_Y', alignment_Y_loss)
        tf.summary.scalar('loss/DC_loss_x', dc_loss_x)
        tf.summary.scalar('loss/DC_loss_y', dc_loss_y)
        #tf.summary.scalar('loss/X_features_loss', X_features_loss)
        #tf.summary.scalar('loss/Y_features_loss', Y_features_loss)
        tf.summary.scalar('loss/Feat_loss', Feat_loss)

        generatedX1 = self.Gd(input_Gd)
        tf.summary.image('X/generated', utils.batch_convert2int(generatedX1))

        #noise2 = tf.random_normal(rep_Ey.shape, mean=mean_Y,
        #stddev=tf.sqrt(var_Y))

        noise2 = tf.random_normal([self.batch_size, 1, 1, 10])
        fake_Zex_y2 = self.ZEx_Y(noise2)

        generatedX2 = self.Gd(tf.concat([rep_Sx, fake_Zex_y2], 3))
        tf.summary.image('X/generated2', utils.batch_convert2int(generatedX2))

        noiseXVar = tf.reduce_mean(
            tf.abs(generatedX1[:, :4, :4, :] - generatedX2[:, :4, :4, :]))
        tf.summary.scalar('Eval/XnoiseVar', noiseXVar)

        # Autoencoders
        autoX = self.Fd(tf.concat([rep_Ex, rep_Sx], 3))
        tf.summary.image('X/autoencoder_rec', utils.batch_convert2int(autoX))
        tf.summary.image('X/exclusive_rec',
                         utils.batch_convert2int(self.Gdex(rep_Ex)))

        autoY = self.Gd(tf.concat([rep_Sy, rep_Ey], 3))
        tf.summary.image('Y/autoencoder_rec',
                         utils.batch_convert2int(autoY),
                         max_outputs=3)
        tf.summary.image('Y/exclusive_rec',
                         utils.batch_convert2int(self.Fdex(rep_Ey)))

        swapScoreBKG = self.computeSwapScoreBKG(rep_Sy, rep_Ey, autoY)

        # swap representation
        #ex1 = tf.reshape(rep_Ey[0,:],[1,rep_Ey.shape[1],rep_Ey.shape[2],rep_Ey.shape[3]])
        #s1 = tf.reshape(rep_Sy[0,:],[1,rep_Sy.shape[1],rep_Sy.shape[2],rep_Sy.shape[3]])
        #ex2 = tf.reshape(rep_Ey[1,:],[1,rep_Ey.shape[1],rep_Ey.shape[2],rep_Ey.shape[3]])
        #s2 = tf.reshape(rep_Sy[1,:],[1,rep_Sy.shape[1],rep_Sy.shape[2],rep_Sy.shape[3]])
        #ex3 = tf.reshape(rep_Ey[2,:],[1,rep_Ey.shape[1],rep_Ey.shape[2],rep_Ey.shape[3]])

        #im1bk2 = self.Gd(tf.concat([s1, ex2],3))
        #tf.summary.image('X/im1bk2',utils.batch_convert2int(im1bk2))

        #im2bk1 = self.Gd(tf.concat([s2, ex1],3))
        #tf.summary.image('X/im2bk1', utils.batch_convert2int(im2bk1))

        #im2bk3 = self.Gd(tf.concat([s2, ex3],3))
        #tf.summary.image('X/im2bk3', utils.batch_convert2int(im2bk3))

        ##Evaluation test on swapped background
        #swapScoreBKG = tf.reduce_mean(tf.abs(im1bk2[0,:4,:4,:] - autoY[1,:4,:4,:])) + tf.reduce_mean(tf.abs(im2bk1[0,:4,:4,:] - autoY[0,:4,:4,:]))
        tf.summary.scalar('Eval/swapScoreBKG', swapScoreBKG)

        tf.summary.image('Y/generated',
                         utils.batch_convert2int(self.Fd(input_Fd)))

        # swap representation, X images
        ex1X = tf.reshape(
            rep_Ex[0, :],
            [1, rep_Ex.shape[1], rep_Ex.shape[2], rep_Ex.shape[3]])
        s1X = tf.reshape(
            rep_Sx[0, :],
            [1, rep_Sx.shape[1], rep_Sx.shape[2], rep_Sx.shape[3]])
        ex2X = tf.reshape(
            rep_Ex[1, :],
            [1, rep_Ex.shape[1], rep_Ex.shape[2], rep_Ex.shape[3]])
        s2X = tf.reshape(
            rep_Sx[1, :],
            [1, rep_Sx.shape[1], rep_Sx.shape[2], rep_Sx.shape[3]])

        im1bk2 = self.Fd(tf.concat([ex2X, s1X], 3))
        tf.summary.image('Y/im1bk2', utils.batch_convert2int(im1bk2))

        im2bk1 = self.Fd(tf.concat([ex1X, s2X], 3))
        tf.summary.image('Y/im2bk1', utils.batch_convert2int(im2bk1))

        im2bk0 = self.Fd(tf.concat([tf.zeros(ex1X.shape), s2X], 3))
        tf.summary.image('Y/im2bkg0', utils.batch_convert2int(im2bk0))

        #pdb.set_trace()
        #autoX1 = tf.reshape(autoX[0,:,:,:],[1,32,32,3])
        #autoX2 = tf.reshape(autoX[1,:,:,:],[1,32,32,3])

        # Evaluation test on swapped background
        swapScoreFG = tf.reduce_mean(
            tf.abs(im1bk2[0, :, :, :] - autoX[0, :, :, :])) + tf.reduce_mean(
                tf.abs(im2bk1[0, :, :, :] - autoX[1, :, :, :]))

        tf.summary.scalar('Eval/swapScoreFG', swapScoreFG)

        # Show representation
        tf.summary.image('ZZExclRep/Xgenerated',
                         utils.batch_convert2fmint(rep_Ex, self.nfe),
                         max_outputs=16)
        tf.summary.image('ZZExclRep/Xnoise',
                         utils.batch_convert2fmint(fake_Zex_y, self.nfe),
                         max_outputs=16)
        tf.summary.image('ZZExclRep/Ygenerated',
                         utils.batch_convert2fmint(rep_Ey, self.nfe),
                         max_outputs=16)
        tf.summary.image('ZZExclRep/Ynoise',
                         utils.batch_convert2fmint(fake_Zex_x, self.nfe),
                         max_outputs=16)
        tf.summary.image('ZZSharedRep/X',
                         utils.batch_convert2fmint(rep_Sx, self.nfs),
                         max_outputs=4)
        tf.summary.image('ZZSharedRep/Y',
                         utils.batch_convert2fmint(rep_Sy, self.nfs),
                         max_outputs=4)

        # build dictionary to return
        loss_dict = {
            'G_loss': G_loss,
            'D_Y_loss': D_Y_loss,
            'Dex_Y_loss': Dex_Y_loss,
            'DZEx_Y_loss': DZEx_Y_loss,
            'F_loss': F_loss,
            'D_X_loss': D_X_loss,
            'Dex_X_loss': Dex_X_loss,
            'DZEx_X_loss': DZEx_X_loss,
            'A_loss': A_loss,
            'Feat_loss': Feat_loss,
            'DC_loss': DC_loss,
            'ZEx_loss': ZEx_loss,
            'fake_y': fake_y,
            'fake_x': fake_x,
            'fake_ex_y': fake_ex_y,
            'fake_ex_x': fake_ex_x,
            'fake_Zex_x': fake_Zex_x,
            'fake_Zex_y': fake_Zex_y,
            'swapScoreFG': swapScoreFG,
            'swapScoreBKG': swapScoreBKG
        }
        return loss_dict
Пример #22
0
    def model(self):
        X_reader = Reader(self.X_train_file,
                          name='X',
                          image_size=self.image_size,
                          batch_size=self.batch_size)
        Y_reader = Reader(self.Y_train_file,
                          name='Y',
                          image_size=self.image_size,
                          batch_size=self.batch_size)

        x = X_reader.feed()
        y = Y_reader.feed()

        cycle_loss = self.cycle_consistency_loss(self.Ge, self.Gd, self.Fe,
                                                 self.Fd, x, y)
        alignment_loss = self.alignment_loss(self.Ge, self.Gd, self.Fe,
                                             self.Fd, x, y)

        # X -> Y
        fake_y = self.Gd(self.Ge(x))
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)
        G_loss = G_gan_loss + cycle_loss + alignment_loss
        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           self.fake_y,
                                           use_lsgan=self.use_lsgan)

        # Y -> X
        fake_x = self.Fd(self.Fe(y))
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)
        F_loss = F_gan_loss + cycle_loss + alignment_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           self.fake_x,
                                           use_lsgan=self.use_lsgan)

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.Gd(self.Ge(x))))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.Fd(self.Fe(y))))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)
        tf.summary.scalar('loss/alignment', alignment_loss)

        tf.summary.image('X/generated',
                         utils.batch_convert2int(self.Gd(self.Ge(x))))
        tf.summary.image(
            'X/reconstruction',
            utils.batch_convert2int(self.Fd(self.Fe(self.Gd(self.Ge(x))))))
        tf.summary.image('X/self_reconstruction',
                         utils.batch_convert2int(self.Fd(self.Ge(x))))
        tf.summary.image('Y/generated',
                         utils.batch_convert2int(self.Fd(self.Fe(y))))
        tf.summary.image(
            'Y/reconstruction',
            utils.batch_convert2int(self.Gd(self.Ge(self.Fd(self.Fe(y))))))
        tf.summary.image('Y/self_reconstruction',
                         utils.batch_convert2int(self.Gd(self.Fe(y))),
                         max_outputs=3)

        #pdb.set_trace()
        #tf.summary.image('X/Ge',
        #utils.batch_convert2fmint(self.Ge(x)),max_outputs=100)

        #tf.summary.image('Y/Ge',
        #utils.batch_convert2fmint(self.Ge(y)),max_outputs=100)

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
    def model(self):
        X_reader = Reader(self.X_train_file,
                          name='X',
                          image_size=self.image_size,
                          batch_size=self.batch_size)
        Y_reader = Reader(self.Y_train_file,
                          name='Y',
                          image_size=self.image_size,
                          batch_size=self.batch_size)

        x = X_reader.feed()
        y = Y_reader.feed()

        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)

        # X -> Y
        fake_y = self.G(x)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)
        # generator hint staff yw3025
        # railway loss
        G_railway_loss = self.railway_loss(x, fake_y, True)
        G_loss = G_gan_loss + cycle_loss + G_railway_loss
        # discriminator hint staff yw3025
        # call loss function
        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           self.fake_y,
                                           use_lsgan=self.use_lsgan,
                                           dy=True)

        # Y -> X
        fake_x = self.F(y)
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)
        # generator hint staff yw3025
        # railway loss
        F_railway_loss = self.railway_loss(y, fake_x, False)
        F_loss = F_gan_loss + cycle_loss + F_railway_loss
        # discriminator hint staff yw3025
        # call loss function
        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           self.fake_x,
                                           use_lsgan=self.use_lsgan,
                                           dy=False)

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(self.F(self.G(x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(self.G(self.F(y))))

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x, y, x
Пример #24
0
    def model(self):
        # res_block_c2, res_block_c3, res_block_c4, res_block_c5 = self.resnet50(self.X)
        res_block_c2, res_block_c3, res_block_c4, res_block_c5 = \
            self.end_points['resnet_v2_50/block1/unit_2/bottleneck_v2'], \
            self.end_points['resnet_v2_50/block2/unit_3/bottleneck_v2'], \
            self.end_points['resnet_v2_50/block3/unit_4/bottleneck_v2'], \
            self.end_points['resnet_v2_50/block4']
        keypoint2, keypoint3, keypoint4, keypoint5, intermediate_output = self.keypoint(
            res_block_c2, res_block_c3, res_block_c4, res_block_c5)
        output = self.d_featurenet(keypoint2, keypoint3, keypoint4, keypoint5)
        keypoint_subnet_loss, intermediate_loss = self.keypoint_subnet_loss(
            output, intermediate_output)

        tf.summary.scalar('keypoint_subnet_loss', keypoint_subnet_loss)

        tf.summary.image('origin_image', utils.batch_convert2int(self.X))
        tf.summary.image(
            'right_ankle_ground_truth',
            utils.batch_convert2int(
                tf.reshape(
                    tf.transpose(self.Y, [3, 0, 1, 2])[0],
                    shape=[-1, self.image_size // 4, self.image_size // 4,
                           1])))
        tf.summary.image(
            'head_ground_truth',
            utils.batch_convert2int(
                tf.reshape(
                    tf.transpose(self.Y, [3, 0, 1, 2])[12],
                    shape=[-1, self.image_size // 4, self.image_size // 4,
                           1])))
        tf.summary.image(
            'neck_ground_truth',
            utils.batch_convert2int(
                tf.reshape(
                    tf.transpose(self.Y, [3, 0, 1, 2])[13],
                    shape=[-1, self.image_size // 4, self.image_size // 4,
                           1])))

        # output_show = tf.zeros_like(output)
        # output_show = tf.where(tf.greater_equal(output, 0.5), output, output_show)

        tf.summary.image(
            'right_ankle_predict',
            utils.batch_convert2int(
                tf.reshape(
                    tf.transpose(output, [3, 0, 1, 2])[0],
                    shape=[-1, self.image_size // 4, self.image_size // 4,
                           1])))
        tf.summary.image(
            'head_predict',
            utils.batch_convert2int(
                tf.reshape(
                    tf.transpose(output, [3, 0, 1, 2])[12],
                    shape=[-1, self.image_size // 4, self.image_size // 4,
                           1])))
        tf.summary.image(
            'neck_predict',
            utils.batch_convert2int(
                tf.reshape(
                    tf.transpose(output, [3, 0, 1, 2])[13],
                    shape=[-1, self.image_size // 4, self.image_size // 4,
                           1])))

        return intermediate_loss, keypoint_subnet_loss
Пример #25
0
    def build(self):

        A_reader = Reader(self.X_train_file,
                          name='RA',
                          image_size=self.image_size,
                          batch_size=self.batch_size)
        B_reader = Reader(self.Y_train_file,
                          name='RB',
                          image_size=self.image_size,
                          batch_size=self.batch_size)

        real_a = A_reader.feed()
        real_b = B_reader.feed()

        # x = tf.placeholder(dtype = tf.float32,shape=(5,270,480,3))
        # y = tf.placeholder(dtype = tf.float32,shape=(5,270,480,3))

        with tf.device("/gpu:0"):
            fake_b = self.Ga2b(real_a)

            label_b = self.Da2b(real_b)
            logits_a2b = self.Da2b(fake_b)
        with tf.device("/gpu:1"):
            fake_a = self.Gb2a(real_b)

            label_a = self.Db2a(real_a)
            logits_b2a = self.Db2a(fake_a)

        cycle_loss = self.cycle_consistency_loss(real_a, real_b, fake_a,
                                                 fake_b)

        with tf.device("/gpu:0"):
            # a -> b
            a2b_gen_loss = self.generator_loss(logits_a2b)
            Ga2b_loss = a2b_gen_loss + cycle_loss

            # Gradient penalty
            alpha = tf.random_uniform(shape=[self.batch_size, 1],
                                      minval=0.,
                                      maxval=1.)

            differences = self.fake_b - real_b
            interpolates = real_b + (alpha * differences)
            gradients = tf.gradients(self.Da2b(interpolates),
                                     [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = tf.reduce_mean((slopes - 1.)**2)

            Da2b_loss = self.discriminator_loss(self.Da2b(self.fake_b),
                                                label_b, gradient_penalty)

        with tf.device("/gpu:1"):
            # b -> a
            b2a_gen_loss = self.generator_loss(logits_b2a)
            Gb2a_loss = b2a_gen_loss + cycle_loss

            # Gradient penalty
            alpha = tf.random_uniform(shape=[self.batch_size, 1],
                                      minval=0.,
                                      maxval=1.)

            differences = self.fake_a - real_a
            interpolates = real_a + (alpha * differences)
            gradients = tf.gradients(self.Db2a(interpolates),
                                     [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = tf.reduce_mean((slopes - 1.)**2)

            Db2a_loss = self.discriminator_loss(self.Db2a(self.fake_a),
                                                label_a, gradient_penalty)

        # summary
        tf.summary.histogram('Da2b/true', label_b)
        tf.summary.histogram('Da2b/fake', self.Da2b(self.fake_b))
        tf.summary.histogram('Db2a/true', label_a)
        tf.summary.histogram('Db2a/fake', self.Db2a(self.fake_a))

        tf.summary.scalar('loss/a2b_gen_loss', a2b_gen_loss)
        tf.summary.scalar('loss/Da2b_loss', Da2b_loss)
        tf.summary.scalar('loss/b2a_gen_loss', b2a_gen_loss)
        tf.summary.scalar('loss/Db2a_loss', Db2a_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)
        tf.summary.scalar('loss/Ga2b_loss', Ga2b_loss)
        tf.summary.scalar('loss/Gb2a_loss', Gb2a_loss)

        tf.summary.image('a2b/real', utils.batch_convert2int(real_a))
        tf.summary.image('a2b/generated', utils.batch_convert2int(fake_b))
        tf.summary.image('a2b/reconstruction',
                         utils.batch_convert2int(self.Gb2a(fake_b)))

        tf.summary.image('b2a/real', utils.batch_convert2int(real_b))
        tf.summary.image('b2a/generated', utils.batch_convert2int(fake_a))
        tf.summary.image('b2a/reconstruction',
                         utils.batch_convert2int(self.Ga2b(fake_a)))

        return Ga2b_loss, Da2b_loss, Gb2a_loss, Db2a_loss, fake_a, fake_b, real_a, real_b
Пример #26
0
    def model(self):
        fake_y = self.G(self.x_image)
        fake_x = self.F(self.y_image)
        # cycle loss
        cycle_loss = self.cycle_consistency_loss(self.G, self.F, self.x_image,
                                                 self.y_image, fake_x, fake_y)
        # ink_loss
        ink_loss_D = self.discriminator_loss(self.D_Y,
                                             self.y_image,
                                             fake_y,
                                             gan="ink_loss")
        ink_loss_G = self.generator_loss(self.D_Y,
                                         self.y_image,
                                         gan="ink_loss")
        # identity loss
        id_loss = self.cycle_consistency_loss(self.G, self.F, self.x_image,
                                              self.y_image, self.y_image,
                                              self.x_image)

        # X -> Y
        G_gan_loss = self.generator_loss(self.D_Y, fake_y, gan=cfg.gan)
        G_loss = G_gan_loss + cycle_loss + id_loss + ink_loss_G

        D_Y_loss = self.discriminator_loss(
            self.D_Y, self.y_image, self.fake_y, gan=cfg.gan) + ink_loss_D

        # Y -> X
        F_gan_loss = self.generator_loss(self.D_X, fake_x, gan=cfg.gan)
        F_loss = F_gan_loss + cycle_loss + id_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           self.x_image,
                                           self.fake_x,
                                           gan=cfg.gan)

        # summary
        tf.summary.histogram('D_Y/true',
                             tf.reduce_mean(self.D_Y(self.y_image)))
        tf.summary.histogram('D_Y/fake',
                             tf.reduce_mean(self.D_Y(self.G(self.x_image))))
        tf.summary.histogram('D_X/true',
                             tf.reduce_mean(self.D_X(self.x_image)))
        tf.summary.histogram('D_X/fake',
                             tf.reduce_mean(self.D_X(self.F(self.y_image))))

        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)
        tf.summary.scalar('loss/ink_D', ink_loss_D)
        tf.summary.scalar('loss/ink_G', ink_loss_G)
        tf.summary.scalar('loss/id', id_loss)

        x_generate = fake_y
        x_reconstruct = self.F(fake_y)

        y_generate = fake_x
        y_reconstruct = self.G(fake_x)

        tf.summary.scalar('debug/real_x_mean', tf.reduce_mean(self.x_image))
        tf.summary.scalar('debug/fake_x_mean', tf.reduce_mean(y_generate))
        tf.summary.scalar('debug/real_y_mean', tf.reduce_mean(self.y_image))
        tf.summary.scalar('debug/fake_y_mean', tf.reduce_mean(x_generate))

        tf.summary.image('X/input',
                         utils.batch_convert2int(self.x_image[:, :, :, :3]))
        tf.summary.image('X/generated', utils.batch_convert2int(x_generate))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(x_reconstruct[:, :, :, :3]))
        tf.summary.image('Y/input', utils.batch_convert2int(self.y_image))
        tf.summary.image('Y/generated',
                         utils.batch_convert2int(y_generate[:, :, :, :3]))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(y_reconstruct))

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #27
0
    def model(self):
        XY_reader = ReaderPaired(self.XY_train_file,
                                 name='XY',
                                 image_size=self.image_size,
                                 batch_size=self.batch_size)

        xy = XY_reader.feed()

        # Split returned batch into both domains
        x = xy[0]
        y = xy[1]

        # Generate representation with encoders
        # X -> Y
        rep_Sx, rep_Ex = self.Ge(x)
        # Y -> X
        rep_Sy, rep_Ey = self.Fe(y)

        # For now, we still need this
        mean_X, var_X = tf.nn.moments(rep_Ex, axes=[0, 1, 2])
        mean_Y, var_Y = tf.nn.moments(rep_Ey, axes=[0, 1, 2])

        # make this automatic
        noiseZ = tf.random_normal([32, 2, 2, 100], mean=0, stddev=1)

        # Generate exclusive from noise
        fake_rep_Ex = self.Gnoise(noiseZ)

        Gnoise_loss = self.generator_loss(self.D_Gnoise,
                                          fake_rep_Ex,
                                          use_lsgan=self.use_lsgan)
        D_Gnoise_loss = self.discriminator_loss(self.D_Gnoise,
                                                rep_Ex,
                                                fake_rep_Ex,
                                                use_lsgan=self.use_lsgan)

        # Here, the exlusive bit comes before the shared part
        input_Gd = tf.concat([rep_Sx, fake_rep_Ex], 3)

        fake_y = self.Gd(input_Gd)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)

        # Add reconstruction loss on shared features
        repR_Sx, _ = self.Fe(fake_y)
        X_features_loss = tf.reduce_mean(tf.abs(repR_Sx - rep_Sx))

        # Transformation from exclusive to shared features
        GT_loss = tf.reduce_mean(tf.abs(self.Gtex(rep_Ex) - rep_Sx))

        G_loss = G_gan_loss + GT_loss

        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           self.fake_y,
                                           use_lsgan=self.use_lsgan)

        noise = tf.random_normal(rep_Ex.shape,
                                 mean=mean_X,
                                 stddev=tf.sqrt(var_X))

        input_Fd = tf.concat([noise, rep_Sy], 3)

        fake_x = self.Fd(input_Fd)
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)

        repR_Sy, _ = self.Ge(fake_x)
        Y_features_loss = tf.reduce_mean(tf.abs(repR_Sy - rep_Sy))

        # Transformation from exclusive to shared features
        FT_loss = tf.reduce_mean(tf.abs(self.Ftex(rep_Ey) - rep_Sy))

        F_loss = F_gan_loss + FT_loss

        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           self.fake_x,
                                           use_lsgan=self.use_lsgan)

        # Alignment loss for autoencoders
        alignment_X_loss = tf.reduce_mean(
            tf.abs(self.Fd(tf.concat([rep_Ex, rep_Sx], 3)) - x))
        alignment_Y_loss = tf.reduce_mean(
            tf.abs(self.Gd(tf.concat([rep_Sy, rep_Ey], 3)) - y))

        # Add feature reconstruction loss to alignment as they work on same var set
        A_loss = alignment_X_loss + alignment_Y_loss
        Feat_loss = X_features_loss + Y_features_loss

        multiply = tf.constant([self.batch_size])
        dom_labels_x = tf.reshape(tf.tile(tf.constant([1.0, 0.0]), multiply),
                                  [multiply[0], 2])
        dom_labels_y = tf.reshape(tf.tile(tf.constant([0.0, 1.0]), multiply),
                                  [multiply[0], 2])
        dc_loss_x = self.domainClassifier_loss(self.DC, rep_Sx, dom_labels_x)
        dc_loss_y = self.domainClassifier_loss(self.DC, rep_Sy, dom_labels_y)

        DC_pred_X = tf.nn.softmax(self.DC(rep_Sx))
        DC_pred_Y = tf.nn.softmax(self.DC(rep_Sy))
        DC_loss = dc_loss_x + dc_loss_y

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.Gd(input_Gd)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.Fd(input_Fd)))

        tf.summary.histogram('RepX/exc', rep_Ex)
        tf.summary.histogram('RepX/gen', rep_Sx)
        tf.summary.histogram('RepX/noise', fake_rep_Ex)

        tf.summary.histogram('RepY/exc', rep_Ey)
        tf.summary.histogram('RepY/gen', rep_Sy)
        tf.summary.histogram('RepY/noise', noise)

        tf.summary.histogram('DC/X/scoreX', DC_pred_X[:, 0])
        tf.summary.histogram('DC/X/scoreY', DC_pred_X[:, 1])
        tf.summary.histogram('DC/Y/scoreX', DC_pred_Y[:, 0])
        tf.summary.histogram('DC/Y/scoreY', DC_pred_Y[:, 1])

        tf.summary.scalar('loss/G_total', G_loss)
        tf.summary.scalar('loss/G_gan', G_gan_loss)
        tf.summary.scalar('loss/GT_loss', GT_loss)
        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F_total', F_loss)
        tf.summary.scalar('loss/F_gan', F_gan_loss)
        tf.summary.scalar('loss/FT_total', FT_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/alignment_X', alignment_X_loss)
        tf.summary.scalar('loss/alignment_Y', alignment_Y_loss)
        tf.summary.scalar('loss/DC_loss_x', dc_loss_x)
        tf.summary.scalar('loss/DC_loss_y', dc_loss_y)
        tf.summary.scalar('loss/X_features_loss', X_features_loss)
        tf.summary.scalar('loss/Y_features_loss', Y_features_loss)
        tf.summary.scalar('loss/Gnoise_gan', Gnoise_loss)
        tf.summary.scalar('loss/D_Gnoise', D_Gnoise_loss)

        tf.summary.image('X/generated',
                         utils.batch_convert2int(self.Gd(input_Gd)))

        noiseZ = tf.random_normal([32, 2, 2, 100], mean=0, stddev=1)
        fake_rep_Ex = self.Gnoise(noiseZ)

        tf.summary.image(
            'X/generated2',
            utils.batch_convert2int(
                self.Gd(tf.concat([rep_Sx, fake_rep_Ex], 3))))

        # swap representation, Y images
        ex1 = tf.reshape(
            rep_Ey[0, :],
            [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])
        s1 = tf.reshape(rep_Sy[0, :],
                        [1, rep_Sy.shape[1], rep_Sy.shape[2], rep_Sy.shape[3]])
        ex2 = tf.reshape(
            rep_Ey[1, :],
            [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])
        s2 = tf.reshape(rep_Sy[1, :],
                        [1, rep_Sy.shape[1], rep_Sy.shape[2], rep_Sy.shape[3]])

        ex3 = tf.reshape(
            rep_Ey[2, :],
            [1, rep_Ey.shape[1], rep_Ey.shape[2], rep_Ey.shape[3]])

        tf.summary.image(
            'X/im1bk2',
            utils.batch_convert2int(self.Gd(tf.concat([s1, ex2], 3))))

        tf.summary.image(
            'X/im2bk1',
            utils.batch_convert2int(self.Gd(tf.concat([s2, ex1], 3))))

        #tf.summary.image('X/sanitycheckim1bk1',
        #                utils.batch_convert2int(self.Gd(tf.concat([s1, ex1],3))))

        tf.summary.image(
            'X/im2bk3',
            utils.batch_convert2int(self.Gd(tf.concat([s2, ex3], 3))))

        tf.summary.image(
            'X/autoencoder_rec',
            utils.batch_convert2int(self.Fd(tf.concat([rep_Ex, rep_Sx], 3))))
        #tf.summary.image('X/exclusive_rec',
        #utils.batch_convert2int(self.Gdex(rep_Ex)))

        tf.summary.image('Y/generated',
                         utils.batch_convert2int(self.Fd(input_Fd)))
        tf.summary.image('Y/autoencoder_rec',
                         utils.batch_convert2int(
                             self.Gd(tf.concat([rep_Sy, rep_Ey], 3))),
                         max_outputs=3)
        # swap representation, X images
        ex1X = tf.reshape(
            rep_Ex[0, :],
            [1, rep_Ex.shape[1], rep_Ex.shape[2], rep_Ex.shape[3]])
        s1X = tf.reshape(
            rep_Sx[0, :],
            [1, rep_Sx.shape[1], rep_Sx.shape[2], rep_Sx.shape[3]])
        ex2X = tf.reshape(
            rep_Ex[1, :],
            [1, rep_Ex.shape[1], rep_Ex.shape[2], rep_Ex.shape[3]])
        s2X = tf.reshape(
            rep_Sx[1, :],
            [1, rep_Sx.shape[1], rep_Sx.shape[2], rep_Sx.shape[3]])

        tf.summary.image(
            'Y/im1bk2',
            utils.batch_convert2int(self.Fd(tf.concat([ex2X, s1X], 3))))

        tf.summary.image(
            'Y/im2bk1',
            utils.batch_convert2int(self.Fd(tf.concat([ex1X, s2X], 3))))

        tf.summary.image(
            'Y/im2bkg0',
            utils.batch_convert2int(
                self.Fd(tf.concat([tf.zeros(ex1X.shape), s2X], 3))))

        # tf.summary.image('Y/exclusive_rec',
        #utils.batch_convert2int(self.Fdex(rep_Ey)))

        # Noise visualization
        #tf.summary.image('ZExclRep/Xgenerated', utils.batch_convert2fmint(rep_Ex,16),max_outputs=16)
        #tf.summary.image('ZExclRep/Xnoise', utils.batch_convert2fmint(noise2,16),max_outputs=16)
        #tf.summary.image('ZExclRep/Ygenerated', utils.batch_convert2fmint(rep_Ey,16),max_outputs=16)
        #tf.summary.image('ZExclRep/Ynoise', utils.batch_convert2fmint(noise,16),max_outputs=16)
        #tf.summary.image('ZSharedRep/X', utils.batch_convert2fmint(rep_Sx,32),max_outputs=4)
        #tf.summary.image('ZSharedRep/Y', utils.batch_convert2fmint(rep_Sy,32),max_outputs=4)

        return G_loss, D_Y_loss, F_loss, D_X_loss, A_loss, Feat_loss, DC_loss, Gnoise_loss, D_Gnoise_loss, fake_y, fake_x
Пример #28
0
  def model(self):
    X_reader = Reader(self.X_train_file, name='X',
        image_size=self.image_size, batch_size=self.batch_size)
    Y_reader = Reader(self.Y_train_file, name='Y',
        image_size=self.image_size, batch_size=self.batch_size)

    x = X_reader.feed()
    y = Y_reader.feed()


    # X -> Y
    rep_Sx, rep_Ex = self.Ge(x)
    noise = tf.random_normal(rep_Ex.shape, mean=self.meanNoise, stddev=self.stddevNoise)

    # Here, the exlusive bit comes before the shared part
    input_Gd = tf.concat([rep_Sx, noise],3)


    fake_y = self.Gd(input_Gd)
    G_gan_loss = self.generator_loss(self.D_Y, fake_y, use_lsgan=self.use_lsgan)

    rep2_Sx, rep2_Ex = self.Fe(fake_y)
    noise = tf.random_normal(rep2_Ex.shape, mean=self.meanNoise, stddev=self.stddevNoise)

    input_Fd2 = tf.concat([noise, rep2_Sx],3)
    cycle_forward_loss = tf.reduce_mean(tf.abs(self.Fd(input_Fd2)-x))
    alignment_X_loss = tf.reduce_mean(tf.abs(self.Fd(tf.concat([rep_Ex,
                                                                rep_Sx],3))))
    # Reverse gradient layer as maximing reconstruction loss
    rev_X_loss = self.generator_loss(self.D_Y, self.Gdex(rep_Ex), use_lsgan=self.use_lsgan)


    #G_loss =  G_gan_loss + self.lambda1*cycle_forward_loss + self.lambda1*alignment_X_loss
    G_loss =  G_gan_loss + self.lambda1*cycle_forward_loss + self.lambda1*alignment_X_loss  + self.lambda1*rev_X_loss
    D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=self.use_lsgan)

    # Y -> X
    rep_Sy, rep_Ey = self.Fe(y)
    noise = tf.random_normal(rep_Ey.shape, mean=self.meanNoise, stddev=self.stddevNoise)

    input_Fd = tf.concat([noise, rep_Sy],3)

    fake_x = self.Fd(input_Fd)
    F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=self.use_lsgan)

    rep2_Sy, rep2_Ey = self.Ge(fake_x)
    noise = tf.random_normal(rep2_Ey.shape, mean=self.meanNoise, stddev=self.stddevNoise)

    input_Gd2 = tf.concat([rep2_Sy, noise],3)
    cycle_backward_loss = tf.reduce_mean(tf.abs(self.Gd(input_Gd2)-y))
    alignment_Y_loss = tf.reduce_mean(tf.abs(self.Gd(tf.concat([rep_Sy,
                                                                rep_Ey],3))))
    F_loss = F_gan_loss + self.lambda2*cycle_backward_loss + self.lambda2*alignment_Y_loss
    #F_loss = F_gan_loss + cycle_loss + alignment_loss
    D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=self.use_lsgan)

    # summary
    tf.summary.histogram('D_Y/true', self.D_Y(y))
    tf.summary.histogram('D_Y/fake', self.D_Y(self.Gd(input_Gd)))
    tf.summary.histogram('D_X/true', self.D_X(x))
    tf.summary.histogram('D_X/fake', self.D_X(self.Fd(input_Fd)))

    tf.summary.histogram('RepX/exc', rep_Ex)
    tf.summary.histogram('RepX/gen', rep_Sx)
    tf.summary.histogram('RepX/noise', noise)

    tf.summary.scalar('loss/G_gan', G_gan_loss)
    tf.summary.scalar('loss/D_Y', D_Y_loss)
    tf.summary.scalar('loss/F_gan', F_gan_loss)
    tf.summary.scalar('loss/D_X', D_X_loss)
    tf.summary.scalar('loss/cycle_forward',  cycle_forward_loss)
    tf.summary.scalar('loss/cycle_backward',  cycle_backward_loss)
    tf.summary.scalar('loss/alignment_X', alignment_X_loss)
    tf.summary.scalar('loss/alignment_Y', alignment_Y_loss)

    tf.summary.image('X/generated',
                     utils.batch_convert2int(self.Gd(input_Gd)))
    tf.summary.image('X/cycle_rec',
                     utils.batch_convert2int(self.Fd(input_Fd2)))
    tf.summary.image('X/autoencoder_rec',
                     utils.batch_convert2int(self.Fd(tf.concat([rep_Ex, rep_Sx],3))))
    tf.summary.image('X/exclusive_rec',
                     utils.batch_convert2int(self.Gdex(rep_Ex)))


    tf.summary.image('Y/generated', utils.batch_convert2int(self.Fd(input_Fd)))
    tf.summary.image('Y/cycle_rec',
                     utils.batch_convert2int(self.Gd(input_Gd2)))
    tf.summary.image('Y/autoencoder_rec',
                     utils.batch_convert2int(self.Gd(tf.concat([rep_Sy,
                                                                rep_Ey],3))),max_outputs=3)
    #tf.summary.image('Y/exclusive_rec',
                     #utils.batch_convert2int(self.Fdex(rep_Ex)))

    #pdb.set_trace()
    tf.summary.image('Gen/X',
                     utils.batch_convert2fmint(rep_Sx,128),max_outputs=10)

    tf.summary.image('Ex/X',
                     utils.batch_convert2fmint(rep_Ex,64),max_outputs=10)

    tf.summary.image('Gen/Y',
                     utils.batch_convert2fmint(rep_Sy,128),max_outputs=10)

    tf.summary.image('Ex/Y',
                     utils.batch_convert2fmint(rep_Ey,64),max_outputs=10)




    return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #29
0
 def sample(self, input):
     image = utils.batch_convert2int(self.__call__(input))
     #tf.squeeze 去掉维度值为1的维度 这里[0]特指去掉第一个维度值 这意味着第一个维度值必须为1 否则会报错
     image = tf.image.encode_jpeg(tf.squeeze(image, [0]))
     return image
Пример #30
0
    def model(self):
        X_reader = Reader(self.X_train_file,
                          name='X',
                          image_size=self.image_size,
                          batch_size=self.BATCH_SIZE)
        Y_reader = Reader(self.Y_train_file,
                          name='Y',
                          image_size=self.image_size,
                          batch_size=self.BATCH_SIZE)

        x = X_reader.feed()
        y = Y_reader.feed()

        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)
        # X -> Y
        fake_y = self.G(x)
        G_gan_loss = self.generator_loss(self.D_Y,
                                         fake_y,
                                         use_lsgan=self.use_lsgan)
        G_loss = G_gan_loss + cycle_loss
        D_Y_loss = self.discriminator_loss(self.D_Y,
                                           y,
                                           fake_y,
                                           use_lsgan=self.use_lsgan)
        lsq_y_loss = self.mse_loss(y, self.G(self.F(y)))
        # Y -> X
        fake_x = self.F(y)
        #fake_x = self.phys(y)
        F_gan_loss = self.generator_loss(self.D_X,
                                         fake_x,
                                         use_lsgan=self.use_lsgan)
        F_loss = F_gan_loss + cycle_loss
        D_X_loss = self.discriminator_loss(self.D_X,
                                           x,
                                           fake_x,
                                           use_lsgan=self.use_lsgan)
        lsq_x_loss = self.mse_loss(x, self.F(self.G(x)))
        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
        tf.summary.histogram('D_X/true', self.D_X(x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))

        tf.summary.scalar('loss/lsq_x_loss', lsq_x_loss)
        tf.summary.scalar('loss/lsq_y_loss', lsq_y_loss)
        tf.summary.scalar('loss/mean_x', tf.reduce_mean(x))
        tf.summary.scalar('loss/mean_fakex', tf.reduce_mean(fake_y))
        tf.summary.scalar('loss/max_x', tf.reduce_max(x))
        tf.summary.scalar('loss/max_fakex', tf.reduce_max(fake_y))
        tf.summary.scalar('loss/min_x', tf.reduce_min(x))
        tf.summary.scalar('loss/min_fakex', tf.reduce_min(fake_y))

        tf.summary.scalar('loss/D_Y', D_Y_loss)
        tf.summary.scalar('loss/F', F_gan_loss)
        tf.summary.scalar('loss/G', G_gan_loss)
        tf.summary.scalar('loss/D_X', D_X_loss)
        tf.summary.scalar('loss/cycle', cycle_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
        tf.summary.image('X/reconstruction',
                         utils.batch_convert2int(self.F(self.G(x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
        tf.summary.image('Y/reconstruction',
                         utils.batch_convert2int(self.G(self.F(y))))

        return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
Пример #31
0
 def sample(self, input):
   image = utils.batch_convert2int(self.__call__(input))
   image = tf.image.encode_jpeg(tf.squeeze(image, [0]))
   return image
Пример #32
0
    def model(self):

        # Import the compressed train datas
        self.reader_set = [
            Reader(self.train_file,
                   name='%s/train_%d' % (labels[i], i),
                   image_size=self.image_size,
                   batch_size=self.batch_size).feed()
            for i in xrange(self.number_domain)
        ]

        # Computing cycle losses for number_domain, which totally have number_domain - 1 items
        self.cycle_loss_set = [
            self.cycle_consistency_loss(special_domain=i + 1)
            for i in xrange(self.number_domain - 1)
        ]

        # Computing adversarial losses for number_domain
        self.loss = [
            self.gan_cycle_loss(special_domain=i + 1)
            for i in xrange(self.number_domain - 1)
        ]
        # Note that 'x' points to the anchor, 'y'
        self.G_y_set = sum(
            [self.loss[i][0] for i in xrange(self.number_domain - 1)])
        self.D_y_set = sum(
            [self.loss[i][1] for i in xrange(self.number_domain - 1)])
        self.F_x_set = sum(
            [self.loss[i][2] for i in xrange(self.number_domain - 1)])
        self.D_x_set = sum([
            self.loss[i][3] for i in xrange(self.number_domain - 1)
        ])  #This is big question
        # cycle_loss_twin = self.cycle_consistency_loss(self.G, self.F, x, z,enconder_name ='twin_enconder_1',deconder_name='twin_deconder_1')

        #   ################################################################################################################
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[1]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_0')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[2]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_1')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[3]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_2')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[4]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_3')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[5]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_4')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[6]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_5')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[7]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_6')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[8]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_7')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[9]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_8')))
        tf.summary.image(
            '%s/generated_%s' % (labels[0], labels[10]),
            utils.batch_convert2int(
                self.G(self.reader_set[0], deconder_name='twin_deconder_9')))
        tf.summary.image('%s/generated_%s' % (labels[0], 'mask'),
                         utils.batch_convert2int(self.G.FCN_mask))
        self.raw_image_generated_images0 = [
            self.G(self.reader_set[0], deconder_name='twin_deconder_%d' % i)
            for i in xrange(10)
        ]
        self.raw_image_generated_images0.insert(0, self.reader_set[0])
        #   tf.summary.image('X/reconstruction_from_Y', utils.batch_convert2int(self.F(G_x)))
        #   tf.summary.image('X/reconstruction_from_former_features_to_Y', utils.batch_convert2int(F_x))
        #   tf.summary.image('X/reconstruction_from_Z', utils.batch_convert2int(self.F(G_from_x_to_z, enconder_name ='twin_enconder_1')))
        #   tf.summary.image('X/reconstruction_from_former_features_to_Z', utils.batch_convert2int(F_x_z))
        #  # tf.summary.image('X/generated_Y', utils.batch_convert2int(self.G(x)))
        #  # tf.summary.image('X/reconstruction_from_Y', utils.batch_convert2int(self.F(self.G(x))))
        #  # tf.summary.image('X/reconstruction_from_former_features_to_Y', utils.batch_convert2int(F_x))
        #   ################################################################################################################
        #
        #   ################################################################################################################
        F_y, F_features = self.F(self.reader_set[1],
                                 output_media_features=True)
        tf.summary.image('%s/generated_%s' % (labels[1], labels[0]),
                         utils.batch_convert2int(F_y))

        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[2]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_1'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[3]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_2'
                       )  # F_y actually doesnot work
            ))

        self.raw_image_generated_images1 = [F_y] + [
            self.G(F_y,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images1[1] = self.reader_set[1]
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[4]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_3'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[5]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_4'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[6]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_5'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[7]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_6'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[8]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_7'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[9]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_8'
                       )  # F_y actually doesnot work
            ))
        tf.summary.image(
            '%s/generated_%s' % (labels[1], labels[10]),
            utils.batch_convert2int(
                self.G(F_y,
                       use_media_features_from_former_network=F_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_9'
                       )  # F_y actually doesnot work
            ))

        #   tf.summary.image('Y/generated_X', utils.batch_convert2int(F_y))
        #   tf.summary.image('Y/reconstruction_from_X', utils.batch_convert2int(self.G(F_y)))
        #   tf.summary.image('Y/reconstruction_from_former_features', utils.batch_convert2int(G_y))
        #   ################################################################################################################
        #
        F_z, F_z_features = self.F(self.reader_set[2],
                                   output_media_features=True,
                                   enconder_name='twin_enconder_1')
        tf.summary.image('%s/generated_%s' % (labels[2], labels[0]),
                         utils.batch_convert2int(F_z))

        #   G_x_from_z = self.G(F_z,use_media_features_from_former_network =F_z_features,use_media_features = True,deconder_name='twin_deconder_1') # F_z actually doesnot work
        tf.summary.image(
            '%s/generated_%s' % (labels[2], labels[1]),
            utils.batch_convert2int(
                self.G(F_z,
                       use_media_features_from_former_network=F_z_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_0'
                       )  # F_z actually doesnot work
            ))

        tf.summary.image(
            '%s/generated_%s' % (labels[2], labels[3]),
            utils.batch_convert2int(
                self.G(F_z,
                       use_media_features_from_former_network=F_z_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_2'
                       )  # F_z actually doesnot work
            ))
        self.raw_image_generated_images2 = [F_z] + [
            self.G(F_z,
                   use_media_features_from_former_network=F_z_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images2[2] = self.reader_set[2]
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[4]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_3') # F_z actually doesnot work
        #    ))
        #
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[5]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_4') # F_z actually doesnot work
        #    ))
        #
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[6]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_5') # F_z actually doesnot work
        #    ))
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[7]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_6') # F_z actually doesnot work
        #    ))
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[8]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_7') # F_z actually doesnot work
        #    ))
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[9]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_8') # F_z actually doesnot work
        #    ))
        #tf.summary.image('%s/generated_%s'%(labels[2],labels[10]), utils.batch_convert2int(
        #    self.G(F_z,use_media_features_from_former_network =F_z_features,
        #        use_media_features = True,deconder_name='twin_deconder_9') # F_z actually doesnot work
        #    ))
        #   tf.summary.image('Z/generated_X', utils.batch_convert2int(F_z))
        #   tf.summary.image('Z/reconstruction_from_X', utils.batch_convert2int(self.G(F_z,deconder_name='twin_deconder_1')))
        ##   tf.summary.image('Z/reconstruction_from_former_features', utils.batch_convert2int(G_x_from_z))
        #  tf.summary.image('train_2/generated_y', utils.batch_convert2int(G_y_from_z))
        #  tf.summary.image('train_2/generated_y_then_x', utils.batch_convert2int(self.F(G_y_from_z)))
        ##
        #  tf.summary.image('train_2/generated_W', utils.batch_convert2int(G_w_2))
        ##   #return G_loss+G_loss_Z, D_Y_loss+ D_Z_loss, F_loss+F_loss_Z, D_X_loss+D_X_loss_from_Z, fake_y, fake_x,fake_z
        F_h, F_h_features = self.F(self.reader_set[3],
                                   output_media_features=True,
                                   enconder_name='twin_enconder_2')
        tf.summary.image('%s/generated_%s' % (labels[3], labels[0]),
                         utils.batch_convert2int(F_h))

        #   G_x_from_z = self.G(F_z,use_media_features_from_former_network =F_z_features,use_media_features = True,deconder_name='twin_deconder_1') # F_z actually doesnot work
        tf.summary.image(
            '%s/generated_%s' % (labels[3], labels[1]),
            utils.batch_convert2int(
                self.G(F_h,
                       use_media_features_from_former_network=F_h_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_0'
                       )  # F_z actually doesnot work
            ))

        tf.summary.image(
            '%s/generated_%s' % (labels[3], labels[2]),
            utils.batch_convert2int(
                self.G(F_h,
                       use_media_features_from_former_network=F_h_features,
                       use_media_features=True,
                       deconder_name='twin_deconder_1'
                       )  # F_z actually doesnot work
            ))
        self.raw_image_generated_images3 = [F_h] + [
            self.G(F_h,
                   use_media_features_from_former_network=F_h_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images3[3] = self.reader_set[3]

        F_, F_features = self.F(self.reader_set[4],
                                output_media_features=True,
                                enconder_name='twin_enconder_3')
        self.raw_image_generated_images4 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images4[4] = self.reader_set[4]

        F_, F_features = self.F(self.reader_set[5],
                                output_media_features=True,
                                enconder_name='twin_enconder_4')
        self.raw_image_generated_images5 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images5[5] = self.reader_set[5]

        F_, F_features = self.F(self.reader_set[6],
                                output_media_features=True,
                                enconder_name='twin_enconder_5')
        self.raw_image_generated_images6 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images6[6] = self.reader_set[6]

        F_, F_features = self.F(self.reader_set[7],
                                output_media_features=True,
                                enconder_name='twin_enconder_6')
        self.raw_image_generated_images7 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images7[7] = self.reader_set[7]

        F_, F_features = self.F(self.reader_set[8],
                                output_media_features=True,
                                enconder_name='twin_enconder_7')
        self.raw_image_generated_images8 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images8[8] = self.reader_set[8]

        F_, F_features = self.F(self.reader_set[9],
                                output_media_features=True,
                                enconder_name='twin_enconder_8')
        self.raw_image_generated_images9 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images9[9] = self.reader_set[9]
        F_, F_features = self.F(self.reader_set[10],
                                output_media_features=True,
                                enconder_name='twin_enconder_9')
        self.raw_image_generated_images10 = [F_] + [
            self.G(F_,
                   use_media_features_from_former_network=F_features,
                   use_media_features=True,
                   deconder_name='twin_deconder_%d' % i) for i in xrange(10)
        ]
        self.raw_image_generated_images10[10] = self.reader_set[10]

        self.raw_image_generated_images = self.raw_image_generated_images0+self.raw_image_generated_images1+self.raw_image_generated_images2+self.raw_image_generated_images3+self.raw_image_generated_images4+ \
                                            self.raw_image_generated_images5+self.raw_image_generated_images6+self.raw_image_generated_images7+self.raw_image_generated_images8+self.raw_image_generated_images9+ \
                self.raw_image_generated_images10

        # Save trainable variables
        G_not_use_var = [
            'twin_enconder_%d' % (i + 1)
            for i in xrange(self.number_domain - 2)
        ]
        self.G_train_var = [
            v for v in self.G.variables if v.name[2:17] not in G_not_use_var
        ]

        F_not_use_var = [
            'twin_deconder_%d' % (i + 1)
            for i in xrange(self.number_domain - 2)
        ]
        self.F_train_var = [
            v for v in self.F.variables if v.name[2:17] not in F_not_use_var
        ]
        #
        return self.G_y_set, self.D_y_set, self.F_x_set, self.D_x_set