Beispiel #1
0
    def __build_graph(self):
        tf.set_random_seed(SEED)
        np.random.seed(SEED)
        self.is_training = tf.placeholder(tf.bool)
        self.x1 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.x2 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))

        # Normalize + reshape 'real' input data
        norm_x1 = 2*(tf.cast(self.x1, tf.float32)-.5)
        norm_x2 = 2 * (tf.cast(self.x2, tf.float32) - .5)
        # norm_img_black=2*(tf.cast(self.img_black, tf.float32)-.5)
        # Set Encoder and Decoder archs
        self.Encoder, self.Decoder,self.Classifier,self.gan_discriminator = NetsRetreiverWithClassifier(self.arch) 
    
        # Encode
        self.z1 = self.__Enc(norm_x1)
        # original stage
        # Decode
        self.x_out1 = self.__Dec(self.z1)

        self.z2=self.__Enc(norm_x2)

        z1_part1,z1_part2=tf.split(self.z1,2,axis=1)
        z2_part1, z2_part2 = tf.split(self.z2, 2, axis=1)
        x2fg_x1bg=tf.concat([z2_part1,z1_part2],axis=1)
        x1fg_x2bg=tf.concat([z1_part1,z2_part2],axis=1)

        self.x2fg_x1bg_out=self.__Dec(x2fg_x1bg)
        self.x1fg_x2bg_out = self.__Dec(x1fg_x2bg)

        # Loss and optimizer
        self.__prep_loss_optimizer(norm_x1)
Beispiel #2
0
    def __build_graph(self):
        tf.set_random_seed(SEED)
        np.random.seed(SEED)
        self.is_training = tf.placeholder(tf.bool)
        self.x1 = tf.placeholder(tf.float32,
                                 shape=[None] + list(self.image_shape))
        self.gt0 = tf.placeholder(tf.float32,
                                  shape=[None] +
                                  list([self.latent_num + 9]))  # label

        # Normalize + reshape 'real' input data
        norm_x1 = 2 * (tf.cast(self.x1, tf.float32) - .5)
        # Set Encoder and Decoder archs
        self.Encoder, self.Decoder, self.Classifier, self.gan_discriminator = NetsRetreiverWithClassifier(
            self.arch)

        # Encode
        self.z1 = self.__Enc(norm_x1)
        # original stage
        # Decode
        self.x_out1 = self.__Dec(self.z1)

        # split latent representation into 2 part and  classification
        r_part1, r_part2 = tf.split(self.z1, 2, axis=1)

        c_p0 = self.__Classifier(r_part1)
        c_p1 = self.__Classifier(r_part2)

        print("c_p1.shape")
        print(c_p1.shape)

        # Loss and optimizer
        self.__prep_loss_optimizer(norm_x1, c_p0, c_p1)
Beispiel #3
0
    def __build_graph(self):
        tf.set_random_seed(SEED)
        np.random.seed(SEED)
        self.is_training = tf.placeholder(tf.bool)
        self.x1 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.label=tf.placeholder(tf.float32, shape=[None] + list([self.latent_num*(self.latent_num+1)]))
        self.mask= tf.placeholder(tf.float32, shape=[None] + list([self.latent_dim]))
        self.mask_zero=tf.placeholder(tf.float32, shape=[None] + list([self.latent_dim]))
        self.img_black=tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.gt0=tf.placeholder(tf.float32, shape=[None] + list([self.latent_num+1]))
        gts0=self.gt0
        # classification gt
        #self.vec_one=tf.placeholder(tf.float32, shape=[None] + list([1])) # which used in the classification loss
        #self.class_gt0=tf.placeholder(tf.float32, shape=[None] + list([self.latent_num+1]))

        # Normalize + reshape 'real' input data
        norm_x1 = 2*(tf.cast(self.x1, tf.float32)-.5)
        norm_img_black=2*(tf.cast(self.img_black, tf.float32)-.5)
        # Set Encoder and Decoder archs
        self.Encoder, self.Decoder,self.Classifier,self.gan_discriminator = NetsRetreiverWithClassifier(self.arch) 
    
        # Encode
        self.z1 = self.__Enc(norm_x1)
        # original stage
        # Decode
        self.x_out1 = self.__Dec(self.z1)
        # random set 0
        self.r1=tf.multiply(self.z1,self.mask)
        self.x_out_r0 = self.__Dec(self.r1)

        # set representation all 000
        self.r_zero=tf.multiply(self.z1,self.mask_zero)
        self.img_out_zero=self.__Dec(self.r_zero)
        # split latent representation into 3 part and  classification
        r_all0,r_all0,r_all0=tf.split(self.r_zero,3,axis=1)
        r_part1,r_part2,r_part3=tf.split(self.z1,3,axis=1)
        gts_part1,gts_part2,gts_part3=tf.split(self.label,3,axis=1)

        c_p0=self.__Classifier(r_all0)
        c_p1=self.__Classifier(r_part1)
        c_p2=self.__Classifier(r_part2)
        c_p3=self.__Classifier(r_part3)
        print("gts_part1.shape")
        print(gts_part1.shape)
        print(c_p1.shape)

        # Loss and optimizer
        self.__prep_loss_optimizer(norm_x1,norm_img_black,c_p0,c_p1,c_p2,c_p3,gts0,gts_part1,gts_part2,gts_part3)  
Beispiel #4
0
    def __build_graph(self):
        tf.set_random_seed(SEED)
        np.random.seed(SEED)
        self.is_training = tf.placeholder(tf.bool)
        self.x1 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))

        # Normalize + reshape 'real' input data
        norm_x1 = 2*(tf.cast(self.x1, tf.float32)-.5)
        # norm_img_black=2*(tf.cast(self.img_black, tf.float32)-.5)
        # Set Encoder and Decoder archs
        self.Encoder, self.Decoder,self.Classifier,self.gan_discriminator = NetsRetreiverWithClassifier(self.arch) 
    
        # Encode
        self.z1 = self.__Enc(norm_x1)
        # original stage
        # Decode
        self.x_out1 = self.__Dec(self.z1)

        # Loss and optimizer
        self.__prep_loss_optimizer(norm_x1)
Beispiel #5
0
    def __build_graph(self):
        tf.set_random_seed(SEED)
        np.random.seed(SEED)
        self.is_training = tf.placeholder(tf.bool)
        self.x1 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.aux1_mask = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        # auxilary dataset
        self.aux1 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.aux2 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.aux_GT1 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.aux_GT2 = tf.placeholder(tf.float32, shape=[None] + list(self.image_shape))
        self.class_gt0 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num+9]))
        self.class_gt1 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num+9]))
        self.class_gt2 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num+9]))
        self.class_gt3 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num+9]))
        self.class_gt4 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        self.class_gt5 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        self.class_gt6 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        self.class_gt7 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        self.class_gt8 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        self.class_gt9 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        self.class_gt10 = tf.placeholder(tf.float32, shape=[None] + list([self.latent_num + 9]))
        # onesample labels
        self.aux_class_gt=tf.placeholder(tf.float32,shape=[None]+list([self.latent_num+9]))

        # Normalize + reshape 'real' input data
        norm_x1 = 2*(tf.cast(self.x1, tf.float32)-.5)

        norm_aux1 = 2*(tf.cast(self.aux1, tf.float32)-.5)
        norm_aux2 = 2*(tf.cast(self.aux2, tf.float32)-.5)
        norm_aux_GT1 = 2*(tf.cast(self.aux_GT1, tf.float32)-.5)
        norm_aux_GT2 = 2*(tf.cast(self.aux_GT2, tf.float32)-.5)
        # Set Encoder and Decoder archs
        self.Encoder, self.Decoder,self.Classifier,self.gan_discriminator = NetsRetreiverWithClassifier(self.arch) 
    
        # Encode and decode
        self.z1 = self.__Enc(norm_x1)
        self.x1_out = self.__Dec(self.z1)
        # aux data
        self.aux_z1 = self.__Enc(norm_aux1)
        self.aux1_out = self.__Dec(self.aux_z1)
        self.aux_z2 = self.__Enc(norm_aux2)
        self.aux2_out = self.__Dec(self.aux_z2)

        aux1_head,aux1_bg=tf.split(self.aux_z1,2,axis=1)
        aux2_head,aux2_bg=tf.split(self.aux_z2,2,axis=1)

        GT1_z=tf.concat([aux2_head,aux1_bg],axis=1)
        GT2_z=tf.concat([aux1_head,aux2_bg],axis=1)
        self.GT1_out = self.__Dec(GT1_z)
        self.GT2_out = self.__Dec(GT2_z)
        #dual swap
        x1_head,x1_bg=tf.split(self.z1,2,axis=1)
        self.mix_head_out=self.__Dec(tf.concat([aux1_head,x1_bg],axis=1))
        mix_head,mix_bg=tf.split(self.__Enc(self.mix_head_out),2,axis=1)
        x1_dual_out=self.__Dec(tf.concat([x1_head,mix_bg],axis=1))

        self.aux1_mix_head_out=self.__Dec(tf.concat([x1_head,aux1_bg],axis=1))

        # classification loss
        ## for x1
        r_part1, r_part2 = tf.split(self.z1, 2, axis=1)
        c_p0 = self.__Classifier(r_part1)
        c_p1 = self.__Classifier(r_part2)
        ## for aux1
        aux1_r_part1, aux1_r_part2 = tf.split(self.aux_z1, 2, axis=1)
        aux1_c_p0 = self.__Classifier(aux1_r_part1)
        aux1_c_p1 = self.__Classifier(aux1_r_part2)
        ## for aux2
        aux2_r_part1, aux2_r_part2 = tf.split(self.aux_z2, 2, axis=1)
        aux2_c_p0 = self.__Classifier(aux2_r_part1)
        aux2_c_p1 = self.__Classifier(aux2_r_part2)

        # Loss and optimizer
        self.__prep_loss_optimizer(norm_x1,norm_aux1,norm_aux2,norm_aux_GT1,norm_aux_GT2,x1_dual_out,c_p0,c_p1,aux1_c_p0,aux1_c_p1,aux2_c_p0,aux2_c_p1)
Beispiel #6
0
    def __build_graph(self):
        tf.set_random_seed(SEED)
        np.random.seed(SEED)
        self.is_training = tf.placeholder(tf.bool)
        self.x1 = tf.placeholder(tf.float32,
                                 shape=[None] + list(self.image_shape))
        self.x_gan = tf.placeholder(tf.float32,
                                    shape=[None] + list(self.image_shape))
        self.mask = tf.placeholder(tf.float32,
                                   shape=[None] + list([self.latent_dim]))
        self.mask_zero = tf.placeholder(tf.float32,
                                        shape=[None] + list([self.latent_dim]))
        self.img_white = tf.placeholder(tf.float32,
                                        shape=[None] + list(self.image_shape))
        # classification gt
        self.vec_one = tf.placeholder(
            tf.float32,
            shape=[None] + list([1]))  # which used in the classification loss
        self.class_gt0 = tf.placeholder(tf.float32,
                                        shape=[None] +
                                        list([self.latent_num + 1]))
        self.class_gt1 = tf.placeholder(tf.float32,
                                        shape=[None] +
                                        list([self.latent_num + 1]))
        self.class_gt2 = tf.placeholder(tf.float32,
                                        shape=[None] +
                                        list([self.latent_num + 1]))

        # Normalize + reshape 'real' input data
        norm_x1 = 2 * (tf.cast(self.x1, tf.float32) - .5)
        norm_img_white = 2 * (tf.cast(self.img_white, tf.float32) - .5)
        norm_x_gan = 2 * (tf.cast(self.x_gan, tf.float32) - .5)
        # Set Encoder and Decoder archs
        #self.Encoder, self.Decoder = NetsRetreiver(self.arch)
        #self.Encoder, self.Decoder,self.Classifier = NetsRetreiverWithClassifier(self.arch)
        self.Encoder, self.Decoder, self.Classifier, self.gan_discriminator = NetsRetreiverWithClassifier(
            self.arch)

        # Encode
        self.z1 = self.__Enc(norm_x1)
        # original stage
        # Decode
        self.x_out1 = self.__Dec(self.z1)
        # random set 0
        self.r1 = tf.multiply(self.z1, self.mask)
        self.x_out_r0 = self.__Dec(self.r1)
        r11 = self.__Enc(self.x_out_r0)

        # set representation all 000
        self.r_zero = tf.multiply(self.z1, self.mask_zero)
        self.img_out_zero = self.__Dec(self.r_zero)
        # split latent representation into 2 part and  classification
        r_part1, r_part2 = tf.split(self.r1, 2, axis=1)
        r_all0, r_all0 = tf.split(self.r_zero, 2, axis=1)

        c_p0 = self.__Classifier(r_all0)
        c_p1 = self.__Classifier(r_part1)
        c_p2 = self.__Classifier(r_part2)

        # GAN Discriminator
        self.fake_data = self.x_out_r0
        self.real_data = tf.reshape(norm_x_gan, [
            -1, self.image_shape[0] * self.image_shape[1] * self.image_shape[2]
        ])
        #self.real_data=norm_x1
        disc_real = self.__GAN_discriminator(
            self.real_data)  # input real image data
        disc_fake = self.__GAN_discriminator(
            self.fake_data)  # input fake image data

        # Loss and optimizer
        self.__prep_loss_optimizer(norm_x1, norm_img_white, r11, c_p0, c_p1,
                                   c_p2, disc_real, disc_fake)