def predict_gan(self, batch_data, noise=None, pose=None): _, _, output_en = resnet_yd(batch_data) sample_add_zp = tf.concat([output_en, noise], 1) output_de_middle = Nets.netG_deconder_gamma(sample_add_zp, self.input_channel) output_de = Nets.merge_net_16_unet(output_de_middle, batch_data) return output_de
def inference_recognition(inputs, classnum, reuse=True): _, _, logits = resnet_yd(inputs, reuse=True) with tf.variable_scope('recognation_fc', reuse=reuse): net = slim.fully_connected( logits, classnum, activation_fn=None, normalizer_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.02), scope='recognition_soft') return tf.nn.softmax(net), net, logits
def slerp_gan(self, bath_data): ''' init model for identity and slerpcode :return: ''' with tf.name_scope('test'): _, _, self.encode_slerp = resnet_yd(bath_data, reuse=True) self.encode_slerp_z = tf.get_variable('code', [3, 562]) self.image_syn_slerp_middle = Nets.netG_deconder_gamma( self.encode_slerp_z, 3, reuse=True) self.image_syn_slerp = Nets.merge_net_16_unet( self.image_syn_slerp_middle, bath_data, reuse=True)
def _init_validation_model(self): ''' init model for identity and slerpcode :return: ''' with tf.name_scope('test'): _, _, self.encode_slerp = resnet_yd(self.input_data, reuse=True) # _,_,self.encode_slerp = nets.netG_encoder_gamma(self.input_data,self.class_nums, reuse=True) self.encode_slerp_z = tf.get_variable('code', [3, 562]) self.image_syn_slerp_middle = nets.netG_deconder_gamma( self.encode_slerp_z, self.output_channel, reuse=True) self.image_syn_slerp = nets.merge_net_16_unet( self.image_syn_slerp_middle, self.input_data, reuse=True)
def predict_drgan(self, batch_data, noise=None, pose=None): # _,output_en= Nets.netG_encoder_gamma(batch_data) _, _, output_en = resnet_yd(batch_data) # pose=tf.expand_dims(pose,1) # pose=tf.expand_dims(pose,1) # pose=tf.cast(pose,dtype=tf.float32) # print pose,output_en # sample_add_z = tf.concat([output_en,pose],1) sample_add_zp = tf.concat([output_en, noise], 1) output_de_middle = Nets.netG_deconder_gamma(sample_add_zp, self.input_channel) output_de = Nets.merge_net_16_unet(output_de_middle, batch_data) return output_de
def _init_validation_model(self): ''' init model for identity and slerpcode :return: ''' with tf.name_scope('test'): _, _, output_en_test = resnet_yd(self.batch_data, reuse=True) self.encode_slerp = tf.reshape(output_en_test, [-1, 1, 1, 512], name='slerp_reshape_encoder') self.slerp_code = tf.get_variable( 'slerpcode', shape=(self.test_slerp_count, 1, 1, 512 + self.pose_c + self.light_c), dtype=tf.float32) self.image_syn_slerp = nets.netG_deconder_gamma( self.slerp_code, self.output_channel, reuse=True)
def _predict_drgan_multipie(self, reuse=False): ''' 网络训练 :param reuse: True | False netG_encoder_gamma :return:inference_recognition ''' self.softresult, self.logits_cl, self.encode_fr = nets.inference_recognition( self.batch_data, classnums=self.class_nums, reuse=reuse) noise = tf.random_uniform(shape=(self.batch_size, self.noise_z), minval=-1, maxval=1, dtype=tf.float32, name='input_noise') self.encode_add_z = tf.concat([self.encode_fr, noise], 1) self.output_syn_middle = nets.netG_deconder_gamma(self.encode_add_z, self.output_channel, reuse=reuse) self.output_syn_middle_profile, self.output_syn_middle_front = tf.split( self.output_syn_middle, 2, axis=0) self.output_syn_front = nets.merge_net_16_unet(self.output_syn_middle, self.batch_data, reuse=reuse) self.identity_real, _ = inference(self.batch_data, keep_prob=1, phase_train=False) self.identity_syn, _ = inference(self.output_syn_front, keep_prob=1, phase_train=False, reuse=True) self.output_syn, self.output_gt_syn = tf.split(self.output_syn_front, 2, axis=0) self.real_logits = nets.Custom_netD_discriminator_adloss( self.gt_input_data, reuse=reuse) self.fake_logits = nets.Custom_netD_discriminator_adloss( self.output_syn, reuse=True) self.profile_content, self.front_content = tf.split(self.encode_fr, 2, axis=0) # 生成图像的features self.syn_softmax,self.fake_logits_all, self.syn_encode \ = resnet_yd(self.output_syn_front,reuse=True) self.syn_content, self.syn_front_content = tf.split(self.syn_encode, 2, axis=0) # 计算cosine距离 self.cosine_real = tf.divide( tf.reduce_sum(tf.multiply(self.profile_content, self.front_content), axis=1), tf.multiply( tf.sqrt(tf.reduce_sum(tf.square(self.front_content), axis=1)), tf.sqrt(tf.reduce_sum(tf.square(self.profile_content), axis=1)))) self.cosine_syn = tf.divide( tf.reduce_sum(tf.multiply(self.syn_content, self.syn_front_content), axis=1), tf.multiply( tf.sqrt(tf.reduce_sum(tf.square(self.syn_content), axis=1)), tf.sqrt( tf.reduce_sum(tf.square(self.syn_front_content), axis=1))))
def _predict_drgan_multipie(self, reuse=False): ''' 网络训练 :param reuse: True | False :return: ''' _, self.logits, self.pidrcontent = nets.inference_recognition( self.batch_data, self.class_nums, reuse=reuse) self.output_en = tf.reshape(self.pidrcontent, [-1, 1, 1, 512]) pose_add_noise_onhot = tf.concat([self.pose, self.light], axis=1) pose_add_noise = tf.reshape(pose_add_noise_onhot, [-1, 1, 1, self.pose_c + self.light_c]) pose_add_noise_float = tf.cast(pose_add_noise, dtype=tf.float32) sample_add_pn = tf.concat([self.output_en, pose_add_noise_float], 3) #syn1 self.output_syn1 = nets.netG_deconder_gamma(sample_add_pn, self.output_channel, reuse=reuse) #pose reverse and concat pose和noise 分开 pose_add_noise_onhot_reverse = tf.concat( [self.pose_reverse, self.light_reverse], axis=1) pose_add_noise_reverse = tf.reshape( pose_add_noise_onhot_reverse, [-1, 1, 1, self.pose_c + self.light_c]) pose_add_noise_reverse_float = tf.cast(pose_add_noise_reverse, dtype=tf.float32) sample_add_pn_ex = tf.concat( [self.output_en, pose_add_noise_reverse_float], 3) #syn2 self.output_syn2 = nets.netG_deconder_gamma(sample_add_pn_ex, self.output_channel, reuse=True) self.reallogits = nets.Custom_netD_discriminator_adloss( self.batch_data, reuse=reuse) self.real_poselogits, self.real_lightlogits = nets.Custom_netD_discriminator_psloss( self.batch_data, posenum=self.pose_c, reuse=reuse, illuminationnum=self.light_c) self.pidf1_softmax, self.pidf1logits,self.pidf1content \ = \ resnet_yd(self.output_syn1[:,:,:,::-1],reuse=True) self.fake1logits = nets.Custom_netD_discriminator_adloss( self.output_syn1, reuse=True) self.fake1_poselogits, self.fake1_lightlogits = nets.Custom_netD_discriminator_psloss( self.output_syn1, posenum=self.pose_c, reuse=True, illuminationnum=self.light_c) self.pidf2_softmax, self.pidf2logits,self.pidf2content \ = \ resnet_yd(self.output_syn2[:,:,:,::-1],reuse=True) self.fake2logits = nets.Custom_netD_discriminator_adloss( self.output_syn2, reuse=True) self.fake2_poselogits, self.fake2_lightlogits = nets.Custom_netD_discriminator_psloss( self.output_syn2, posenum=self.pose_c, reuse=True, illuminationnum=self.light_c) self.encode_syn_1, self.encode_syn_2 = resnet_yd( self.output_syn1[:, :, :, ::-1], reuse=True), resnet_yd(self.output_syn2[:, :, :, ::-1], reuse=True)