def transform_addition(self, img1, img2): # encode image enc_1_1,enc_2_1, enc_3_1 = self.encoder.encode(img1) enc_1_2, enc_2_2, enc_3_2 = self.encoder.encode(img2) target_features1 = Strategy(enc_1_1, enc_1_2) target_features2 = Strategy(enc_2_1, enc_2_2) target_features3 = Strategy(enc_3_1, enc_3_2) # target_features = enc_c temp_add = tf.concat([target_features1, target_features2], 3) target_features = tf.concat([temp_add, target_features3], 3) self.target_features = target_features print('target_features:', target_features.shape) # decode target features back to image generated_img = self.decoder.decode(target_features) return generated_img
def transform_addition(self, img1, img2): enc_1 = self.encoder.encode(img1) enc_2 = self.encoder.encode(img2) target_features = Strategy(enc_1, enc_2) self.target_features = target_features print('target_features:', target_features.shape) generated_img = self.decoder.decode(target_features) return generated_img
def transform_addition(self, img1, img2): # encode image enc_1 = self.encoder.encode(img1) enc_2 = self.encoder.encode(img2) # fuse feature maps self.target_features = Strategy(enc_1, enc_2) print('target_features:', self.target_features.shape) # decode target features back to image generated_img = self.decoder.decode(self.target_features) return generated_img
def transform_addition(self, img1, img2): # encode image enc_1, enc_1_res_block,enc_1_block,enc_1_block2 = self.encoder.encode(img1) enc_2, enc_2_res_block ,enc_2_block,enc_2_block2= self.encoder.encode(img2) target_features = Strategy(enc_1, enc_2) # target_features = enc_c self.target_features = target_features print('target_features:', target_features.shape) # decode target features back to image generated_img = self.decoder.decode(target_features,enc_1_block,enc_1_block2) return generated_img
def _handler_mix_a(ir_path, vis_path, model_path, model_pre_path, model_path_a, model_pre_path_a, ssim_weight, index, output_path=None): ir_img = get_train_images(ir_path, flag=False) vis_img = get_train_images(vis_path, flag=False) dimension = ir_img.shape ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]]) vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]]) ir_img = np.transpose(ir_img, (0, 2, 1, 3)) vis_img = np.transpose(vis_img, (0, 2, 1, 3)) g2 = tf.Graph() # 加载到Session 2的graph sess2 = tf.Session(graph=g2) # Session2 with sess2.as_default(): # 1 with g2.as_default(), tf.Session() as sess: infrared_field = tf.placeholder(tf.float32, shape=ir_img.shape, name='content') visible_field = tf.placeholder(tf.float32, shape=vis_img.shape, name='style') dfn = DenseFuseNet(model_pre_path) # sess.run(tf.global_variables_initializer()) enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2 = dfn.transform_encoder( infrared_field) enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2 = dfn.transform_encoder( visible_field) result = tf.placeholder(tf.float32, shape=enc_ir.shape, name='target') saver = tf.train.Saver() saver.restore(sess, model_path) # ------------------------attention------------------------------------------------------ #feature_a,feature_b=_get_attention(ir_path,vis_path,model_path_a,model_pre_path_a) #print("______+++________") #print(feature_a[0].shape) # ------------------------attention------------------------------------------------------ enc_ir_temp, enc_ir_res_block_temp, enc_ir_block_temp, enc_ir_block2_temp = sess.run( [enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2], feed_dict={infrared_field: ir_img}) enc_vis_temp, enc_vis_res_block_temp, enc_vis_block_temp, enc_vis_block2_temp = sess.run( [enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2], feed_dict={visible_field: vis_img}) # ------------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------------ block = 0.8 * enc_vis_block_temp + 0.2 * enc_ir_block_temp block2 = 0.4 * enc_ir_block2_temp + 0.6 * enc_vis_block2_temp #first_first = Strategy(enc_ir_res_block_temp[0], enc_vis_res_block_temp[0]) #first_first = L1_norm(enc_ir_res_block_temp[0], enc_vis_res_block_temp[0]) #first_second = Strategy(enc_ir_res_block_temp[1], enc_vis_res_block_temp[1]) #first_second = L1_norm(enc_ir_res_block_temp[1], enc_vis_res_block_temp[1]) #first_third = Strategy(enc_ir_res_block_temp[2], enc_vis_res_block_temp[2]) #first_third = L1_norm_attention(enc_ir_res_block_temp[2],feature_a, enc_vis_res_block_temp[2],feature_b) #first_four = Strategy(enc_ir_res_block_temp[3], enc_vis_res_block_temp[3]) #first_four = L1_norm_attention(enc_ir_res_block_temp[3],feature_a, enc_vis_res_block_temp[3],feature_b) #first_first = tf.concat([first_first, tf.to_int32(first_second, name='ToInt')], 3) #first_first = tf.concat([first_first, tf.to_int32(first_third, name='ToInt')], 3) #first_first = tf.concat([first_first, first_four], 3) #first = first_first first = Strategy(enc_ir_res_block_temp[3], enc_vis_res_block_temp[3]) second = Strategy(enc_ir_res_block_temp[6], enc_vis_res_block_temp[6]) third = Strategy(enc_ir_res_block_temp[9], enc_vis_res_block_temp[9]) # ------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------ feature = 1 * first + 1 * second + 1 * third # --------------------------------------------------------- # block=Strategy(enc_ir_block_temp,enc_vis_block_temp) # block2=L1_norm(enc_ir_block2_temp,enc_vis_block2_temp) # --------------------------------------------------------- #feature = feature.eval() # --------------将特征图压成单通道---------------------------------- #feature_map_vis_out = sess.run(tf.reduce_sum(feature_a[0], 3, keep_dims=True)) #feature_map_ir_out = sess.run(tf.reduce_sum(feature_b[0],3, keep_dims=True)) # ------------------------------------------------------------------ output_image = dfn.transform_decoder(result, block, block2) # output = dfn.transform_decoder(feature) # print(type(feature)) # output = sess.run(output_image, feed_dict={result: feature,enc_res_block:block,enc_res_block2:block2}) output = sess.run(output_image, feed_dict={result: feature}) save_images(ir_path, output, output_path, prefix='fused' + str(index), suffix='_mix_' + str(ssim_weight))
def _handler_mix(ir_path, vis_path, model_path, model_pre_path, ssim_weight, index, output_path=None): mix_block = [] ir_img = get_train_images(ir_path, flag=False) vis_img = get_train_images(vis_path, flag=False) dimension = ir_img.shape ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]]) vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]]) ir_img = np.transpose(ir_img, (0, 2, 1, 3)) vis_img = np.transpose(vis_img, (0, 2, 1, 3)) print('img shape final:', ir_img.shape) with tf.Graph().as_default(), tf.Session() as sess: infrared_field = tf.placeholder(tf.float32, shape=ir_img.shape, name='content') visible_field = tf.placeholder(tf.float32, shape=vis_img.shape, name='style') # ----------------------------------------------- dfn = DenseFuseNet(model_pre_path) #sess.run(tf.global_variables_initializer()) enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2 = dfn.transform_encoder( infrared_field) enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2 = dfn.transform_encoder( visible_field) result = tf.placeholder(tf.float32, shape=enc_ir.shape, name='target') saver = tf.train.Saver() saver.restore(sess, model_path) enc_ir_temp, enc_ir_res_block_temp, enc_ir_block_temp, enc_ir_block2_temp = sess.run( [enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2], feed_dict={infrared_field: ir_img}) enc_vis_temp, enc_vis_res_block_temp, enc_vis_block_temp, enc_vis_block2_temp = sess.run( [enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2], feed_dict={visible_field: vis_img}) block = L1_norm(enc_ir_block_temp, enc_vis_block_temp) block2 = L1_norm(enc_ir_block2_temp, enc_vis_block2_temp) first_first = L1_norm(enc_ir_res_block_temp[0], enc_vis_res_block_temp[0]) first_second = Strategy(enc_ir_res_block_temp[1], enc_vis_res_block_temp[1]) #first_third = L1_norm_attention(enc_ir_res_block_temp[2],feation_ir, enc_vis_res_block_temp[2],feation_vis) #first_four = L1_norm_attention(enc_ir_res_block_temp[3],feation_ir, enc_vis_res_block_temp[3],feation_vis) first_third = L1_norm(enc_ir_res_block_temp[2], enc_vis_res_block_temp[2]) first_four = Strategy(enc_ir_res_block_temp[3], enc_vis_res_block_temp[3]) first_first = tf.concat( [first_first, tf.to_int32(first_second, name='ToInt')], 3) first_first = tf.concat( [first_first, tf.to_int32(first_third, name='ToInt')], 3) first_first = tf.concat([first_first, first_four], 3) first = first_first second = L1_norm(enc_ir_res_block_temp[6], enc_vis_res_block_temp[6]) third = L1_norm(enc_ir_res_block_temp[9], enc_vis_res_block_temp[9]) feature = 1 * first + 0.1 * second + 0.1 * third #--------------------------------------------------------- # block=Strategy(enc_ir_block_temp,enc_vis_block_temp) # block2=L1_norm(enc_ir_block2_temp,enc_vis_block2_temp) #--------------------------------------------------------- feature = feature.eval() output_image = dfn.transform_decoder(result, block, block2) # output = dfn.transform_decoder(feature) # print(type(feature)) # output = sess.run(output_image, feed_dict={result: feature,enc_res_block:block,enc_res_block2:block2}) output = sess.run(output_image, feed_dict={result: feature}) save_images(ir_path, output, output_path, prefix='fused' + str(index), suffix='_mix_' + str(ssim_weight))