Exemplo n.º 1
0
def _handler_l1(ir_path,
                vis_path,
                model_path,
                model_pre_path,
                ssim_weight,
                index,
                output_path=None):
    ir_img = get_train_images(ir_path, flag=False)
    vis_img = get_train_images(vis_path, flag=False)
    dimension = ir_img.shape

    ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])

    ir_img = np.transpose(ir_img, (0, 2, 1, 3))
    vis_img = np.transpose(vis_img, (0, 2, 1, 3))

    print('img shape final:', ir_img.shape)

    with tf.Graph().as_default(), tf.Session() as sess:

        # build the dataflow graph
        infrared_field = tf.placeholder(tf.float32,
                                        shape=ir_img.shape,
                                        name='content')
        visible_field = tf.placeholder(tf.float32,
                                       shape=ir_img.shape,
                                       name='style')

        dfn = DenseFuseNet(model_pre_path)

        enc_ir = dfn.transform_encoder(infrared_field)
        enc_vis = dfn.transform_encoder(visible_field)

        target = tf.placeholder(tf.float32, shape=enc_ir.shape, name='target')

        output_image = dfn.transform_decoder(target)

        # restore the trained model and run the style transferring
        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis],
                                             feed_dict={
                                                 infrared_field: ir_img,
                                                 visible_field: vis_img
                                             })
        feature = L1_norm(enc_ir_temp, enc_vis_temp)

        output = sess.run(output_image, feed_dict={target: feature})
        save_images(ir_path,
                    output,
                    output_path,
                    prefix='fused' + str(index),
                    suffix='_densefuse_l1norm_' + str(ssim_weight))
Exemplo n.º 2
0
def _handler_l1(content_name, style_name, model_path, model_pre_path, ssim_weight, index, output_path=None):
    infrared_path = content_name
    visible_path = style_name

    content_img = get_train_images(infrared_path, flag=False)
    style_img   = get_train_images(visible_path, flag=False)
    dimension = content_img.shape

    content_img = content_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    style_img   = style_img.reshape([1, dimension[0], dimension[1], dimension[2]])

    content_img = np.transpose(content_img, (0, 2, 1, 3))
    style_img = np.transpose(style_img, (0, 2, 1, 3))
    print('content_img shape final:', content_img.shape)

    with tf.Graph().as_default(), tf.Session() as sess:

        # build the dataflow graph
        content = tf.placeholder(
            tf.float32, shape=content_img.shape, name='content')
        style = tf.placeholder(
            tf.float32, shape=style_img.shape, name='style')

        dfn = DenseFuseNet(model_pre_path)

        enc_c = dfn.transform_encoder(content)
        enc_s = dfn.transform_encoder(style)

        target = tf.placeholder(
            tf.float32, shape=enc_c.shape, name='target')

        output_image = dfn.transform_decoder(target)

        # restore the trained model and run the style transferring
        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        enc_c_temp, enc_s_temp = sess.run([enc_c, enc_s], feed_dict={content: content_img, style: style_img})
        feature = L1_norm(enc_c_temp, enc_s_temp)

        output = sess.run(output_image, feed_dict={target: feature})
        save_images(infrared_path, output, output_path,
                    prefix='fused' + str(index), suffix='_densefuse_l1norm_'+str(ssim_weight))

    return output
def train_recons_a(original_imgs_path, validatioin_imgs_path, save_path_a, model_pre_path_a, ssim_weight_a, EPOCHES_set, BATCH_SIZE,MODEL_SAVE_PATHS, debug=False, logging_period=1):
    if debug:
        from datetime import datetime
        start_time = datetime.now()
    EPOCHS = EPOCHES_set
    print("EPOCHES   : ", EPOCHS)  # EPOCHS = 4           遍历整个数据集的次数,训练网络一共要执行n*4次
    print("BATCH_SIZE: ", BATCH_SIZE)  # BATCH_SIZE = 2       每个Batch有2个样本,共n/2个Batch,每处理两个样本模型权重就更新

    num_val = len(validatioin_imgs_path)  # 测试集样本个数
    num_imgs = len(original_imgs_path)  # 训练集样本个数
    # num_imgs = 100
    original_imgs_path = original_imgs_path[:num_imgs]  # 迷惑行为,自己赋给自己
    mod = num_imgs % BATCH_SIZE  # Batch个数

    print('Train images number %d.\n' % num_imgs)
    print('Train images samples %s.\n' % str(num_imgs / BATCH_SIZE))

    if mod > 0:
        print('Train set has been trimmed %d samples...\n' % mod)
        original_imgs_path = original_imgs_path[:-mod]  # original_imags_path 数组移除最后两个

    # get the traing image shape
    # 训练图像的长宽及通道数    255,255,1
    HEIGHT, WIDTH, CHANNELS = TRAINING_IMAGE_SHAPE
    INPUT_SHAPE = (BATCH_SIZE, HEIGHT, WIDTH, CHANNELS)  # 定义元组,意义不明

    HEIGHT_OR, WIDTH_OR, CHANNELS_OR = TRAINING_IMAGE_SHAPE_OR
    INPUT_SHAPE_OR = (BATCH_SIZE, HEIGHT_OR, WIDTH_OR, CHANNELS_OR)  # OR是什么意思,意义不明

    # create the graph
    with tf.Graph().as_default(), tf.Session() as sess:
        original = tf.placeholder(tf.float32, shape=INPUT_SHAPE_OR, name='original')
        #attention_map = tf.placeholder(tf.float32, shape=INPUT_SHAPE_OR, name='attention')
        # 神经网络构建graph的时候在模型中的占位,只分配必要的内存,运行模型时通过feed_dict()向占位符喂入数据
        # 第一个参数,数据类型,常用tf.float32,tf.float64
        # 第二个参数,数据形状,矩阵形状,图像的长宽及通道数
        # 第三个参数,名称
        # 返回Tensor类型
        source = original  # 迷惑行为,意义不明

        print('source  :', source.shape)
        print('original:', original.shape)

        # create the deepfuse net (encoder and decoder)
        # 创建深度学习网络
        model_pre_path=MODEL_SAVE_PATHS
        dfn = DenseFuseNet(model_pre_path)  # 这里的model_pre_path是自己设置的模型参数,默认是None,若不为None则起始训练的参数为设置的文件

        atn = attention.Attention(None)
        enc, enc_res_block, enc_block, enc_block2 = dfn.transform_encoder(source)
        # weight_map=atn.get_attention(attention_map)
        # enc_res_block_6_a= tf.multiply(enc_res_block[6],weight_map)
        # enc_res_block_9_a=tf.multiply(enc_res_block[9],weight_map)
        # feature = enc_res_block[0]
        # mix_indices = (1, 2, 3)
        # for i in mix_indices:
        #     feature = tf.concat([feature, enc_res_block[i]], 3)
        # t_decode=feature+0.1*enc_res_block_6_a+0.1*enc_res_block_9_a
        new_block2,_ = atn.get_attention(enc_block2)

        generated_img = dfn.transform_decoder(enc,enc_block,new_block2)
        print('generate:', generated_img.shape)
        ssim_loss_value = SSIM_LOSS(original, generated_img)  # 计算SSIM
        pixel_loss = tf.reduce_sum(tf.square(original - generated_img))
        pixel_loss = pixel_loss / (BATCH_SIZE * HEIGHT * WIDTH)  # 计算pixel loss
        ssim_loss = 1 - ssim_loss_value  # SSIM loss数值

        loss = ssim_weight_a * ssim_loss + pixel_loss  # 整体loss
        # train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)     #自适应矩估计(梯度下降的一种方法)
        train_op = tf.train.AdamOptimizer(LEARNING_RATE_2).minimize(loss,var_list=atn.weights)  # 自适应矩估计(梯度下降的一种方法)
        sess.run(tf.global_variables_initializer())

        # saver = tf.train.Saver()
        saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)

        # ** Start Training **
        step = 0
        count_loss = 0
        n_batches = int(len(original_imgs_path) // BATCH_SIZE)
        val_batches = int(len(validatioin_imgs_path) // BATCH_SIZE)

        if debug:
            elapsed_time = datetime.now() - start_time
            print('\nElapsed time for preprocessing before actually train the model: %s' % elapsed_time)
            print('Now begin to train the model...\n')
            start_time = datetime.now()

        Loss_all = [i for i in range(EPOCHS * n_batches)]
        Loss_ssim = [i for i in range(EPOCHS * n_batches)]
        Loss_pixel = [i for i in range(EPOCHS * n_batches)]
        Val_ssim_data = [i for i in range(EPOCHS * n_batches)]
        Val_pixel_data = [i for i in range(EPOCHS * n_batches)]
        for epoch in range(EPOCHS):

            np.random.shuffle(original_imgs_path)

            for batch in range(n_batches):
                # retrive a batch of content and style images

                original_path = original_imgs_path[batch * BATCH_SIZE:(batch * BATCH_SIZE + BATCH_SIZE)]
                original_batch = get_train_images(original_path, crop_height=HEIGHT, crop_width=WIDTH, flag=False)
                original_batch = original_batch.reshape([BATCH_SIZE, 256, 256, 1])

                # print('original_batch shape final:', original_batch.shape)
                # -----------------------------------------------
                # imag = sess.run(original, feed_dict={original: original_batch})
                # guideFilter_imgs = np.zeros(INPUT_SHAPE_OR)
                # for i in range(BATCH_SIZE):
                #     input = np.squeeze(imag[i])
                #     out = atn.Grad(input)
                #     out = np.expand_dims(out, axis=-1)
                #     out[out < 0] = 0
                #     guideFilter_imgs[i] = out
                # ----------------------------------------------
                # run the training step
                sess.run(train_op, feed_dict={original: original_batch})
                step += 1
                if debug:
                    is_last_step = (epoch == EPOCHS - 1) and (batch == n_batches - 1)

                    if is_last_step or step % logging_period == 0:
                        elapsed_time = datetime.now() - start_time
                        _ssim_loss, _loss, _p_loss = sess.run([ssim_loss, loss, pixel_loss],
                                                              feed_dict={original: original_batch})
                        Loss_all[count_loss] = _loss
                        Loss_ssim[count_loss] = _ssim_loss
                        Loss_pixel[count_loss] = _p_loss
                        print('epoch: %d/%d, step: %d,  total loss: %s, elapsed time: %s' % (
                        epoch, EPOCHS, step, _loss, elapsed_time))
                        print('p_loss: %s, ssim_loss: %s ,w_ssim_loss: %s ' % (
                        _p_loss, _ssim_loss, ssim_weight_a * _ssim_loss))

                        # calculate the accuracy rate for 1000 images, every 100 steps
                        val_ssim_acc = 0
                        val_pixel_acc = 0
                        np.random.shuffle(validatioin_imgs_path)
                        val_start_time = datetime.now()
                        for v in range(val_batches):
                            val_original_path = validatioin_imgs_path[v * BATCH_SIZE:(v * BATCH_SIZE + BATCH_SIZE)]
                            val_original_batch = get_train_images(val_original_path, crop_height=HEIGHT,
                                                                  crop_width=WIDTH, flag=False)
                            val_original_batch = val_original_batch.reshape([BATCH_SIZE, 256, 256, 1])
                            val_ssim, val_pixel = sess.run([ssim_loss, pixel_loss],
                                                           feed_dict={original: val_original_batch})
                            val_ssim_acc = val_ssim_acc + (1 - val_ssim)
                            val_pixel_acc = val_pixel_acc + val_pixel
                        Val_ssim_data[count_loss] = val_ssim_acc / val_batches
                        Val_pixel_data[count_loss] = val_pixel_acc / val_batches
                        val_es_time = datetime.now() - val_start_time
                        print('validation value, SSIM: %s, Pixel: %s, elapsed time: %s' % (
                        val_ssim_acc / val_batches, val_pixel_acc / val_batches, val_es_time))
                        print('------------------------------------------------------------------------------')
                        count_loss += 1

        # ** Done Training & Save the model **
        saver.save(sess, save_path_a)
        # ----------------------------------------------------------------------------------------------------------------
        loss_data = Loss_all[:count_loss]
        scio.savemat('/data/ljy/1-Project-Go/01-11-upsampling+attention/models_a/loss/DeepDenseLossData' + str(ssim_weight_a) + '.mat',
                     {'loss': loss_data})

        loss_ssim_data = Loss_ssim[:count_loss]
        scio.savemat('/data/ljy/1-Project-Go/01-11-upsampling+attention/models_a/loss/DeepDenseLossSSIMData' + str(
            ssim_weight_a) + '.mat', {'loss_ssim': loss_ssim_data})

        loss_pixel_data = Loss_pixel[:count_loss]
        scio.savemat('/data/ljy/1-Project-Go/01-11-upsampling+attention/models_a/loss/DeepDenseLossPixelData.mat' + str(
            ssim_weight_a) + '', {'loss_pixel': loss_pixel_data})

        validation_ssim_data = Val_ssim_data[:count_loss]
        scio.savemat('/data/ljy/1-Project-Go/01-11-upsampling+attention/models_a/val/Validation_ssim_Data.mat' + str(
            ssim_weight_a) + '', {'val_ssim': validation_ssim_data})

        validation_pixel_data = Val_pixel_data[:count_loss]
        scio.savemat('/data/ljy/1-Project-Go/01-11-upsampling+attention/models_a/val/Validation_pixel_Data.mat' + str(
            ssim_weight_a) + '', {'val_pixel': validation_pixel_data})
        # ----------------------------------------------------------------------------------------------------
        if debug:
            elapsed_time = datetime.now() - start_time
            print('Done training! Elapsed time: %s' % elapsed_time)
            print('Model is saved to: %s' % save_path_a)
Exemplo n.º 4
0
def _handler_rgb_l1(ir_path, vis_path, model_path, model_pre_path, ssim_weight, index, output_path=None):
	# ir_img = get_train_images(ir_path, flag=False)
	# vis_img = get_train_images(vis_path, flag=False)
	ir_img = get_train_images_rgb(ir_path, flag=False)
	vis_img = get_train_images_rgb(vis_path, flag=False)
	dimension = ir_img.shape

	ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
	vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])

	ir_img = np.transpose(ir_img, (0, 2, 1, 3))
	vis_img = np.transpose(vis_img, (0, 2, 1, 3))

	ir_img1 = ir_img[:, :, :, 0]
	ir_img1 = ir_img1.reshape([1, dimension[0], dimension[1], 1])
	ir_img2 = ir_img[:, :, :, 1]
	ir_img2 = ir_img2.reshape([1, dimension[0], dimension[1], 1])
	ir_img3 = ir_img[:, :, :, 2]
	ir_img3 = ir_img3.reshape([1, dimension[0], dimension[1], 1])

	vis_img1 = vis_img[:, :, :, 0]
	vis_img1 = vis_img1.reshape([1, dimension[0], dimension[1], 1])
	vis_img2 = vis_img[:, :, :, 1]
	vis_img2 = vis_img2.reshape([1, dimension[0], dimension[1], 1])
	vis_img3 = vis_img[:, :, :, 2]
	vis_img3 = vis_img3.reshape([1, dimension[0], dimension[1], 1])

	print('img shape final:', ir_img1.shape)

	with tf.Graph().as_default(), tf.Session() as sess:
		infrared_field = tf.placeholder(
			tf.float32, shape=ir_img1.shape, name='content')
		visible_field = tf.placeholder(
			tf.float32, shape=ir_img1.shape, name='style')

		dfn = DenseFuseNet(model_pre_path)

		enc_ir = dfn.transform_encoder(infrared_field)
		enc_vis = dfn.transform_encoder(visible_field)

		target = tf.placeholder(
			tf.float32, shape=enc_ir.shape, name='target')

		output_image = dfn.transform_decoder(target)

		# restore the trained model and run the style transferring
		saver = tf.train.Saver()
		saver.restore(sess, model_path)

		enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis], feed_dict={infrared_field: ir_img1, visible_field: vis_img1})
		feature = L1_norm(enc_ir_temp, enc_vis_temp)
		output1 = sess.run(output_image, feed_dict={target: feature})

		enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis], feed_dict={infrared_field: ir_img2, visible_field: vis_img2})
		feature = L1_norm(enc_ir_temp, enc_vis_temp)
		output2 = sess.run(output_image, feed_dict={target: feature})

		enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis], feed_dict={infrared_field: ir_img3, visible_field: vis_img3})
		feature = L1_norm(enc_ir_temp, enc_vis_temp)
		output3 = sess.run(output_image, feed_dict={target: feature})

		output1 = output1.reshape([1, dimension[0], dimension[1]])
		output2 = output2.reshape([1, dimension[0], dimension[1]])
		output3 = output3.reshape([1, dimension[0], dimension[1]])

		output = np.stack((output1, output2, output3), axis=-1)
		output = np.transpose(output, (0, 2, 1, 3))
		save_images(ir_path, output, output_path,
		            prefix='fused' + str(index), suffix='_densefuse_l1norm_'+str(ssim_weight))
Exemplo n.º 5
0
def _handler_mix_a(ir_path,
                   vis_path,
                   model_path,
                   model_pre_path,
                   model_path_a,
                   model_pre_path_a,
                   ssim_weight,
                   index,
                   output_path=None):
    ir_img = get_train_images(ir_path, flag=False)
    vis_img = get_train_images(vis_path, flag=False)
    dimension = ir_img.shape
    ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    ir_img = np.transpose(ir_img, (0, 2, 1, 3))
    vis_img = np.transpose(vis_img, (0, 2, 1, 3))

    g2 = tf.Graph()  # 加载到Session 2的graph

    sess2 = tf.Session(graph=g2)  # Session2

    with sess2.as_default():  # 1
        with g2.as_default(), tf.Session() as sess:
            infrared_field = tf.placeholder(tf.float32,
                                            shape=ir_img.shape,
                                            name='content')
            visible_field = tf.placeholder(tf.float32,
                                           shape=vis_img.shape,
                                           name='style')

            dfn = DenseFuseNet(model_pre_path)

            # sess.run(tf.global_variables_initializer())

            enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2 = dfn.transform_encoder(
                infrared_field)
            enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2 = dfn.transform_encoder(
                visible_field)

            result = tf.placeholder(tf.float32,
                                    shape=enc_ir.shape,
                                    name='target')

            saver = tf.train.Saver()
            saver.restore(sess, model_path)

            # ------------------------attention------------------------------------------------------
            #feature_a,feature_b=_get_attention(ir_path,vis_path,model_path_a,model_pre_path_a)
            #print("______+++________")
            #print(feature_a[0].shape)
            # ------------------------attention------------------------------------------------------

            enc_ir_temp, enc_ir_res_block_temp, enc_ir_block_temp, enc_ir_block2_temp = sess.run(
                [enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2],
                feed_dict={infrared_field: ir_img})
            enc_vis_temp, enc_vis_res_block_temp, enc_vis_block_temp, enc_vis_block2_temp = sess.run(
                [enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2],
                feed_dict={visible_field: vis_img})

            # ------------------------------------------------------------------------------------------------------------
            #------------------------------------------------------------------------------------------------------------
            block = 0.8 * enc_vis_block_temp + 0.2 * enc_ir_block_temp
            block2 = 0.4 * enc_ir_block2_temp + 0.6 * enc_vis_block2_temp

            #first_first = Strategy(enc_ir_res_block_temp[0], enc_vis_res_block_temp[0])
            #first_first = L1_norm(enc_ir_res_block_temp[0], enc_vis_res_block_temp[0])
            #first_second = Strategy(enc_ir_res_block_temp[1], enc_vis_res_block_temp[1])
            #first_second = L1_norm(enc_ir_res_block_temp[1], enc_vis_res_block_temp[1])
            #first_third = Strategy(enc_ir_res_block_temp[2], enc_vis_res_block_temp[2])
            #first_third = L1_norm_attention(enc_ir_res_block_temp[2],feature_a, enc_vis_res_block_temp[2],feature_b)
            #first_four = Strategy(enc_ir_res_block_temp[3], enc_vis_res_block_temp[3])
            #first_four = L1_norm_attention(enc_ir_res_block_temp[3],feature_a, enc_vis_res_block_temp[3],feature_b)
            #first_first = tf.concat([first_first, tf.to_int32(first_second, name='ToInt')], 3)
            #first_first = tf.concat([first_first, tf.to_int32(first_third, name='ToInt')], 3)
            #first_first = tf.concat([first_first, first_four], 3)

            #first = first_first

            first = Strategy(enc_ir_res_block_temp[3],
                             enc_vis_res_block_temp[3])
            second = Strategy(enc_ir_res_block_temp[6],
                              enc_vis_res_block_temp[6])
            third = Strategy(enc_ir_res_block_temp[9],
                             enc_vis_res_block_temp[9])
            # ------------------------------------------------------------------------------------------------------------
            # ------------------------------------------------------------------------------------------------------------

            feature = 1 * first + 1 * second + 1 * third

            # ---------------------------------------------------------
            # block=Strategy(enc_ir_block_temp,enc_vis_block_temp)
            # block2=L1_norm(enc_ir_block2_temp,enc_vis_block2_temp)
            # ---------------------------------------------------------

            #feature = feature.eval()

            # --------------将特征图压成单通道----------------------------------
            #feature_map_vis_out = sess.run(tf.reduce_sum(feature_a[0], 3, keep_dims=True))
            #feature_map_ir_out = sess.run(tf.reduce_sum(feature_b[0],3, keep_dims=True))
            # ------------------------------------------------------------------

            output_image = dfn.transform_decoder(result, block, block2)

            # output = dfn.transform_decoder(feature)
            # print(type(feature))
            # output = sess.run(output_image, feed_dict={result: feature,enc_res_block:block,enc_res_block2:block2})
            output = sess.run(output_image, feed_dict={result: feature})

            save_images(ir_path,
                        output,
                        output_path,
                        prefix='fused' + str(index),
                        suffix='_mix_' + str(ssim_weight))
Exemplo n.º 6
0
def _handler_mix(ir_path,
                 vis_path,
                 model_path,
                 model_pre_path,
                 ssim_weight,
                 index,
                 output_path=None):
    mix_block = []
    ir_img = get_train_images(ir_path, flag=False)
    vis_img = get_train_images(vis_path, flag=False)
    dimension = ir_img.shape
    ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    ir_img = np.transpose(ir_img, (0, 2, 1, 3))
    vis_img = np.transpose(vis_img, (0, 2, 1, 3))

    print('img shape final:', ir_img.shape)
    with tf.Graph().as_default(), tf.Session() as sess:
        infrared_field = tf.placeholder(tf.float32,
                                        shape=ir_img.shape,
                                        name='content')
        visible_field = tf.placeholder(tf.float32,
                                       shape=vis_img.shape,
                                       name='style')

        # -----------------------------------------------

        dfn = DenseFuseNet(model_pre_path)

        #sess.run(tf.global_variables_initializer())

        enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2 = dfn.transform_encoder(
            infrared_field)
        enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2 = dfn.transform_encoder(
            visible_field)

        result = tf.placeholder(tf.float32, shape=enc_ir.shape, name='target')

        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        enc_ir_temp, enc_ir_res_block_temp, enc_ir_block_temp, enc_ir_block2_temp = sess.run(
            [enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2],
            feed_dict={infrared_field: ir_img})
        enc_vis_temp, enc_vis_res_block_temp, enc_vis_block_temp, enc_vis_block2_temp = sess.run(
            [enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2],
            feed_dict={visible_field: vis_img})

        block = L1_norm(enc_ir_block_temp, enc_vis_block_temp)
        block2 = L1_norm(enc_ir_block2_temp, enc_vis_block2_temp)

        first_first = L1_norm(enc_ir_res_block_temp[0],
                              enc_vis_res_block_temp[0])
        first_second = Strategy(enc_ir_res_block_temp[1],
                                enc_vis_res_block_temp[1])
        #first_third = L1_norm_attention(enc_ir_res_block_temp[2],feation_ir, enc_vis_res_block_temp[2],feation_vis)
        #first_four = L1_norm_attention(enc_ir_res_block_temp[3],feation_ir, enc_vis_res_block_temp[3],feation_vis)
        first_third = L1_norm(enc_ir_res_block_temp[2],
                              enc_vis_res_block_temp[2])
        first_four = Strategy(enc_ir_res_block_temp[3],
                              enc_vis_res_block_temp[3])
        first_first = tf.concat(
            [first_first, tf.to_int32(first_second, name='ToInt')], 3)
        first_first = tf.concat(
            [first_first, tf.to_int32(first_third, name='ToInt')], 3)
        first_first = tf.concat([first_first, first_four], 3)

        first = first_first

        second = L1_norm(enc_ir_res_block_temp[6], enc_vis_res_block_temp[6])
        third = L1_norm(enc_ir_res_block_temp[9], enc_vis_res_block_temp[9])

        feature = 1 * first + 0.1 * second + 0.1 * third

        #---------------------------------------------------------
        # block=Strategy(enc_ir_block_temp,enc_vis_block_temp)
        # block2=L1_norm(enc_ir_block2_temp,enc_vis_block2_temp)
        #---------------------------------------------------------

        feature = feature.eval()

        output_image = dfn.transform_decoder(result, block, block2)

        # output = dfn.transform_decoder(feature)
        # print(type(feature))
        # output = sess.run(output_image, feed_dict={result: feature,enc_res_block:block,enc_res_block2:block2})
        output = sess.run(output_image, feed_dict={result: feature})

        save_images(ir_path,
                    output,
                    output_path,
                    prefix='fused' + str(index),
                    suffix='_mix_' + str(ssim_weight))
Exemplo n.º 7
0
def _handler_rgb_l1(images_path,
                    model_path,
                    model_pre_path,
                    index,
                    output_path=None):
    size = len(images_path)
    images = ["" for x in range(size)]
    ir_img1 = ["" for x in range(size)]
    ir_img2 = ["" for x in range(size)]
    ir_img3 = ["" for x in range(size)]
    for x in range(0, size):
        images[x] = get_train_images_rgb(images_path[x], flag=False)
        dimension = images[x].shape

        images[x] = images[x].reshape(
            [1, dimension[0], dimension[1], dimension[2]])

        images[x] = np.transpose(images[x], (0, 2, 1, 3))

        ir_img1[x] = images[x][:, :, :, 0]
        ir_img1[x] = ir_img1[x].reshape([1, dimension[0], dimension[1], 1])
        ir_img2[x] = images[x][:, :, :, 1]
        ir_img2[x] = ir_img2[x].reshape([1, dimension[0], dimension[1], 1])
        ir_img3[x] = images[x][:, :, :, 2]
        ir_img3[x] = ir_img3[x].reshape([1, dimension[0], dimension[1], 1])

    print('img shape final:', ir_img1[0].shape)

    with tf.Graph().as_default(), tf.Session() as sess:
        images_field = ["" for x in range(size)]
        for x in range(0, size):
            images_field[x] = tf.placeholder(tf.float32,
                                             shape=ir_img1[0].shape)

        dfn = DenseFuseNet(model_pre_path)
        enc_irs = ["" for x in range(size)]
        enc_irs = dfn.transform_encoder(images_field)

        target = tf.placeholder(tf.float32,
                                shape=enc_irs[0].shape,
                                name='target')

        output_image = dfn.transform_decoder(target)

        # restore the trained model and run the style transferring
        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        enc_ir_temps = sess.run(
            enc_irs, feed_dict={i: d
                                for i, d in zip(images_field, ir_img1)})
        feature = L1_norm(enc_ir_temps)
        output1 = sess.run(output_image, feed_dict={target: feature})

        enc_ir_temps = sess.run(
            enc_irs, feed_dict={i: d
                                for i, d in zip(images_field, ir_img2)})
        feature = L1_norm(enc_ir_temps)
        output2 = sess.run(output_image, feed_dict={target: feature})

        enc_ir_temps = sess.run(
            enc_irs, feed_dict={i: d
                                for i, d in zip(images_field, ir_img3)})
        feature = L1_norm(enc_ir_temps)
        output3 = sess.run(output_image, feed_dict={target: feature})

        output1 = output1.reshape([1, dimension[0], dimension[1]])
        output2 = output2.reshape([1, dimension[0], dimension[1]])
        output3 = output3.reshape([1, dimension[0], dimension[1]])

        output = np.stack((output1, output2, output3), axis=-1)
        output = np.transpose(output, (0, 2, 1, 3))
        save_images(images_path,
                    output,
                    output_path,
                    prefix='fused' + str(index),
                    suffix='_densefuse_l1norm')