Ejemplo n.º 1
0
    def _predict_drgan_multipie(self,reuse=False):
        '''
        网络训练
        :param reuse: True | False
        :return:
        '''
        with    tf.device('/gpu:1'):
            self.softresult,self.logits_all,self.encode = nets.inference_recognition(self.batch_data,classnums=self.class_nums,reuse=reuse)
            #
            self.logits=tf.split(self.logits_all,2,axis=0)[0]
            # self.logits,self.gt_logits= tf.split(self.logits,2,axis=0)
            #
            # self.encode, self.gt_encode= tf.split(self.encode, 2, axis=0)
            #
            # self.softresult, self.gt_softresult = tf.split(self.softresult, 2, axis=0)
            #syn1
            noise = tf.random_uniform(shape=(self.batch_size*self.gpus_count,self.noise_z), minval=-1, maxval=1, dtype=tf.float32,
                                  name='input_noise')
            self.encode_add_z=tf.concat([self.encode,noise],1)

            # self.gt_encode_add_z=tf.concat([self.gt_encode,noise],1)
        with tf.device('/gpu:2'):
            self.output_syn_all = nets.netG_deconder_gamma(self.encode_add_z, self.output_channel,reuse=reuse)
            self.output_syn,self.output_gt_syn=tf.split(self.output_syn_all,2,axis=0)
            # self.output_gt_syn = nets.netG_deconder_gamma(self.gt_encode_add_z,self.output_channel,reuse=True)
        # with tf.device('/gpu:2'):
            self.real_logits=nets.Custom_netD_discriminator_adloss(self.gt_input_data,reuse=reuse)

            self.fake_logits = nets.Custom_netD_discriminator_adloss(self.output_syn,reuse=True)
        with tf.device('/gpu:1'):
            #正脸图片的feature
            self.pidprofile_content, self.pidr_content = tf.split(self.encode , 2, axis=0)

            #生成图像的feature
            self.fake_softmax,self.fake_logits_all,self.fake_content\
            = nets.inference_recognition(self.output_syn_all,classnums=self.class_nums,reuse=True)
            self.pidf_content,self.fake_gt_content=tf.split(self.fake_content,2,axis=0)
            self.f_logits = tf.split(self.fake_logits_all, 2, axis=0)[0]

            #计算cosine距离
            self.cosine_real=tf.divide(tf.reduce_sum(tf.multiply(self.pidprofile_content,self.pidr_content),axis=1),
                                       tf.multiply(tf.sqrt(tf.reduce_sum(tf.square(self.pidr_content),axis=1)),
                                                   tf.sqrt(tf.reduce_sum(tf.square(self.pidprofile_content),axis=1))))
            self.cosine_syn=tf.divide(tf.reduce_sum(tf.multiply(self.pidf_content,self.fake_gt_content),axis=1),
                                       tf.multiply(tf.sqrt(tf.reduce_sum(tf.square(self.pidf_content),axis=1)),
                                                   tf.sqrt(tf.reduce_sum(tf.square(self.fake_gt_content),axis=1))))

            self.cosine_real_residual=tf.divide(tf.reduce_sum(tf.multiply(self.pidprofile_content[1:,:],self.pidr_content[:-1,:]),axis=1),
                                       tf.multiply(tf.sqrt(tf.reduce_sum(tf.square(self.pidr_content[:-1,:]),axis=1)),
                                                   tf.sqrt(tf.reduce_sum(tf.square(self.pidprofile_content[1:,:]),axis=1))))
            self.cosine_syn_residual=tf.divide(tf.reduce_sum(tf.multiply(self.pidf_content[1:,:],self.fake_gt_content[:-1,:]),axis=1),
                                       tf.multiply(tf.sqrt(tf.reduce_sum(tf.square(self.pidf_content[1:,:]),axis=1)),
                                                   tf.sqrt(tf.reduce_sum(tf.square(self.fake_gt_content[:-1,:]),axis=1))))
Ejemplo n.º 2
0
    def _init_model(self):
        '''
        init modle for train
        :return:
        '''
        # tf.set_random_seed(20)
        # with tf.Graph().as_default():

        self.global_step = slim.get_or_create_global_step()
        self.batch_data = tf.placeholder(dtype=tf.float32,shape=[None,self.input_size,self.input_size,self.input_channel],name='input_images')#image
        self.batch_label = tf.placeholder(dtype= tf.int64,shape=[None],name='input_labels')#label
       #mk onehot labels
        self.labels = slim.one_hot_encoding(self.batch_label,self.class_nums)
        #comput loss
        self.softmax_real,self.logits,self.fc=nets.inference_recognition(self.batch_data,self.class_nums)
        self.loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            labels=self.labels,logits=self.logits
        ))
        lo=tf.summary.scalar('train/pre_loss',self.loss)
        reshape_R = tf.reshape(self.softmax_real, [-1, self.class_nums])
        max_r = tf.argmax(reshape_R, 1)
        self.predict_rate = tf.equal(max_r, self.batch_label)
        self.accuracy_r = tf.reduce_mean(tf.cast(self.predict_rate, tf.float32))
        acc=tf.summary.scalar('train/pre_rate',self.accuracy_r )
        self.summary_train = tf.summary.merge([lo,acc])

        train_vars = tf.trainable_variables()
        self.fc_add = [var for var in train_vars if 'recognition_fc' in var.name]
        self.vard_fr= [var for var in train_vars if 'resnet_yd' in var.name]
        self.init_vars=self.vard_fr
        self.var_all=self.vard_fr+self.fc_add
        train_optimizer=tf.train.MomentumOptimizer(learning_rate=0.0001,momentum=0.99,name='optimizer')
        self.train_op=train_optimizer.minimize(self.loss,var_list=self.var_all,global_step=self.global_step)
Ejemplo n.º 3
0
 def _init_validation_model(self):
     '''
     init model for identity and slerpcode
     :return:
     '''
     with tf.name_scope('test'):
         _,_,self.encode_slerp = nets.inference_recognition(self.input_data,self.class_nums, reuse=True)
         self.encode_slerp_z=tf.get_variable('code',[3,562])
         self.image_syn_slerp = nets.netG_deconder_gamma(self.encode_slerp_z, self.output_channel, reuse=True)
Ejemplo n.º 4
0
 def _init_validation_model(self):
     '''
     init model for identity and slerpcode
     :return:
     '''
     # slim.assign_from_checkpoint()
     with tf.name_scope('test'):
         self.batch_data_test = tf.placeholder(dtype=tf.float32,
                                          shape=[None, self.input_size, self.input_size, self.input_channel],
                                          name='input_images_test')  # image
         self.batch_label_test = tf.placeholder(dtype=tf.int64, shape=[None], name='input_labels_test')  # label
         self.labels_test = slim.one_hot_encoding(self.batch_label_test,self.class_nums)
         #comput loss
         self.softmax_real_test,self.logits_test,self.fc_test=nets.inference_recognition(self.batch_data_test,self.class_nums,reuse=True)
         self.loss_test=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
             labels=self.labels_test,logits=self.logits_test
         ))
         loss_test=tf.summary.scalar('test/pre_loss_test',self.loss_test)
         reshape_R = tf.reshape(self.softmax_real_test, [-1, self.class_nums])
         max_r = tf.argmax(reshape_R, 1)
         self.predict_rate_test = tf.equal(max_r, self.batch_label_test)
         self.accuracy_r_test = tf.reduce_mean(tf.cast(self.predict_rate_test, tf.float32))
         pre_test=tf.summary.scalar('test/pre_rate_test', self.accuracy_r_test)
         self.summary_test = tf.summary.merge([loss_test,pre_test])
Ejemplo n.º 5
0
 def _predict_drgan_multipie(self, reuse=False):
     '''
     网络训练
     :param reuse: True | False netG_encoder_gamma
     :return:inference_recognition
     '''
     self.softresult, self.logits_cl, self.encode_fr = nets.inference_recognition(
         self.batch_data, classnums=self.class_nums, reuse=reuse)
     noise = tf.random_uniform(shape=(self.batch_size, self.noise_z),
                               minval=-1,
                               maxval=1,
                               dtype=tf.float32,
                               name='input_noise')
     self.encode_add_z = tf.concat([self.encode_fr, noise], 1)
     self.output_syn_middle = nets.netG_deconder_gamma(self.encode_add_z,
                                                       self.output_channel,
                                                       reuse=reuse)
     self.output_syn_middle_profile, self.output_syn_middle_front = tf.split(
         self.output_syn_middle, 2, axis=0)
     self.output_syn_front = nets.merge_net_16_unet(self.output_syn_middle,
                                                    self.batch_data,
                                                    reuse=reuse)
     self.identity_real, _ = inference(self.batch_data,
                                       keep_prob=1,
                                       phase_train=False)
     self.identity_syn, _ = inference(self.output_syn_front,
                                      keep_prob=1,
                                      phase_train=False,
                                      reuse=True)
     self.output_syn, self.output_gt_syn = tf.split(self.output_syn_front,
                                                    2,
                                                    axis=0)
     self.real_logits = nets.Custom_netD_discriminator_adloss(
         self.gt_input_data, reuse=reuse)
     self.fake_logits = nets.Custom_netD_discriminator_adloss(
         self.output_syn, reuse=True)
     self.profile_content, self.front_content = tf.split(self.encode_fr,
                                                         2,
                                                         axis=0)
     # 生成图像的features
     self.syn_softmax,self.fake_logits_all, self.syn_encode \
         = resnet_yd(self.output_syn_front,reuse=True)
     self.syn_content, self.syn_front_content = tf.split(self.syn_encode,
                                                         2,
                                                         axis=0)
     # 计算cosine距离
     self.cosine_real = tf.divide(
         tf.reduce_sum(tf.multiply(self.profile_content,
                                   self.front_content),
                       axis=1),
         tf.multiply(
             tf.sqrt(tf.reduce_sum(tf.square(self.front_content), axis=1)),
             tf.sqrt(tf.reduce_sum(tf.square(self.profile_content),
                                   axis=1))))
     self.cosine_syn = tf.divide(
         tf.reduce_sum(tf.multiply(self.syn_content,
                                   self.syn_front_content),
                       axis=1),
         tf.multiply(
             tf.sqrt(tf.reduce_sum(tf.square(self.syn_content), axis=1)),
             tf.sqrt(
                 tf.reduce_sum(tf.square(self.syn_front_content), axis=1))))