def predict_gan(self, batch_data, noise=None, pose=None): _, _, output_en = resnet_yd(batch_data) sample_add_zp = tf.concat([output_en, noise], 1) output_de_middle = Nets.netG_deconder_gamma(sample_add_zp, self.input_channel) output_de = Nets.merge_net_16_unet(output_de_middle, batch_data) return output_de
def slerp_gan(self, bath_data): ''' init model for identity and slerpcode :return: ''' with tf.name_scope('test'): _, _, self.encode_slerp = resnet_yd(bath_data, reuse=True) self.encode_slerp_z = tf.get_variable('code', [3, 562]) self.image_syn_slerp_middle = Nets.netG_deconder_gamma( self.encode_slerp_z, 3, reuse=True) self.image_syn_slerp = Nets.merge_net_16_unet( self.image_syn_slerp_middle, bath_data, reuse=True)
def _init_validation_model(self): ''' init model for identity and slerpcode :return: ''' with tf.name_scope('test'): _, _, self.encode_slerp = resnet_yd(self.input_data, reuse=True) # _,_,self.encode_slerp = nets.netG_encoder_gamma(self.input_data,self.class_nums, reuse=True) self.encode_slerp_z = tf.get_variable('code', [3, 562]) self.image_syn_slerp_middle = nets.netG_deconder_gamma( self.encode_slerp_z, self.output_channel, reuse=True) self.image_syn_slerp = nets.merge_net_16_unet( self.image_syn_slerp_middle, self.input_data, reuse=True)
def predict_drgan(self, batch_data, noise=None, pose=None): # _,output_en= Nets.netG_encoder_gamma(batch_data) _, _, output_en = resnet_yd(batch_data) # pose=tf.expand_dims(pose,1) # pose=tf.expand_dims(pose,1) # pose=tf.cast(pose,dtype=tf.float32) # print pose,output_en # sample_add_z = tf.concat([output_en,pose],1) sample_add_zp = tf.concat([output_en, noise], 1) output_de_middle = Nets.netG_deconder_gamma(sample_add_zp, self.input_channel) output_de = Nets.merge_net_16_unet(output_de_middle, batch_data) return output_de
def _predict_drgan_multipie(self, reuse=False): ''' 网络训练 :param reuse: True | False netG_encoder_gamma :return:inference_recognition ''' self.softresult, self.logits_cl, self.encode_fr = nets.inference_recognition( self.batch_data, classnums=self.class_nums, reuse=reuse) noise = tf.random_uniform(shape=(self.batch_size, self.noise_z), minval=-1, maxval=1, dtype=tf.float32, name='input_noise') self.encode_add_z = tf.concat([self.encode_fr, noise], 1) self.output_syn_middle = nets.netG_deconder_gamma(self.encode_add_z, self.output_channel, reuse=reuse) self.output_syn_middle_profile, self.output_syn_middle_front = tf.split( self.output_syn_middle, 2, axis=0) self.output_syn_front = nets.merge_net_16_unet(self.output_syn_middle, self.batch_data, reuse=reuse) self.identity_real, _ = inference(self.batch_data, keep_prob=1, phase_train=False) self.identity_syn, _ = inference(self.output_syn_front, keep_prob=1, phase_train=False, reuse=True) self.output_syn, self.output_gt_syn = tf.split(self.output_syn_front, 2, axis=0) self.real_logits = nets.Custom_netD_discriminator_adloss( self.gt_input_data, reuse=reuse) self.fake_logits = nets.Custom_netD_discriminator_adloss( self.output_syn, reuse=True) self.profile_content, self.front_content = tf.split(self.encode_fr, 2, axis=0) # 生成图像的features self.syn_softmax,self.fake_logits_all, self.syn_encode \ = resnet_yd(self.output_syn_front,reuse=True) self.syn_content, self.syn_front_content = tf.split(self.syn_encode, 2, axis=0) # 计算cosine距离 self.cosine_real = tf.divide( tf.reduce_sum(tf.multiply(self.profile_content, self.front_content), axis=1), tf.multiply( tf.sqrt(tf.reduce_sum(tf.square(self.front_content), axis=1)), tf.sqrt(tf.reduce_sum(tf.square(self.profile_content), axis=1)))) self.cosine_syn = tf.divide( tf.reduce_sum(tf.multiply(self.syn_content, self.syn_front_content), axis=1), tf.multiply( tf.sqrt(tf.reduce_sum(tf.square(self.syn_content), axis=1)), tf.sqrt( tf.reduce_sum(tf.square(self.syn_front_content), axis=1))))