Beispiel #1
0
    def test(self, restore_model, save_dir, is_normalize_img=True, prefix=''):
        dataset = BasicDataset(data_list_file=self.dataset_config['data_list_file'], img_dir=self.dataset_config['img_dir'] + prefix, is_normalize_img=is_normalize_img)
        save_name_list = dataset.data_list[:, -1]
        iterator = dataset.create_one_shot_iterator(dataset.data_list, num_parallel_calls=self.num_input_threads)
        batch_img0, batch_img1, batch_img2 = iterator.get_next()
        img_shape = tf.shape(batch_img0)
        h = img_shape[1]
        w = img_shape[2]
        
        new_h = tf.where(tf.equal(tf.mod(h, 64), 0), h, (tf.to_int32(tf.floor(h / 64) + 1)) * 64)
        new_w = tf.where(tf.equal(tf.mod(w, 64), 0), w, (tf.to_int32(tf.floor(w / 64) + 1)) * 64)
        
        batch_img0 = tf.image.resize_images(batch_img0, [new_h, new_w], method=1, align_corners=True)
        batch_img1 = tf.image.resize_images(batch_img1, [new_h, new_w], method=1, align_corners=True)
        batch_img2 = tf.image.resize_images(batch_img2, [new_h, new_w], method=1, align_corners=True)
        
        flow_fw, flow_bw = pyramid_processing(batch_img0, batch_img1, batch_img2, train=False, trainable=False, is_scale=True) 
        flow_fw['full_res'] = flow_resize(flow_fw['full_res'], [h, w], method=1)
        flow_bw['full_res'] = flow_resize(flow_bw['full_res'], [h, w], method=1)
        
        flow_fw_color = flow_to_color(flow_fw['full_res'], mask=None, max_flow=256)
        flow_bw_color = flow_to_color(flow_bw['full_res'], mask=None, max_flow=256)
        
        restore_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 
        saver = tf.train.Saver(var_list=restore_vars)
        sess = tf.Session()
        sess.run(tf.global_variables_initializer()) 
        sess.run(iterator.initializer) 
        saver.restore(sess, restore_model)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)           
        for i in range(dataset.data_num):
            np_flow_fw, np_flow_bw, np_flow_fw_color, np_flow_bw_color = sess.run([flow_fw['full_res'], flow_bw['full_res'], flow_fw_color, flow_bw_color])
            misc.imsave(('%s/' + '%s.png') % (save_dir, save_name_list[i]), np_flow_fw_color[0])
            # misc.imsave(('%s/' + prefix + '_%s.png') % (save_dir, save_name_list[i]), np_flow_fw_color[0])
            # misc.imsave(('%s/' + prefix + '_flow_fw_color_%s.png') % (save_dir, save_name_list[i]), np_flow_fw_color[0])
            # misc.imsave(('%s/' + prefix + '_flow_bw_color_%s.png') % (save_dir, save_name_list[i]), np_flow_bw_color[0])
            # write_flo('%s/flow_fw_%s.flo' % (save_dir, save_name_list[i]), np_flow_fw[0])
            # write_flo('%s/flow_bw_%s.flo' % (save_dir, save_name_list[i]), np_flow_bw[0])
            print('Finish %d/%d' % (i+1, dataset.data_num))    
         
        

        
        
    

        
            
              
Beispiel #2
0
def _pyramid_processing(x1_feature, x2_feature, img_size, train=True, trainable=True, reuse=None, regularizer=None, is_scale=True):
    x_shape = tf.shape(x1_feature['conv6_2'])
    initial_flow = tf.zeros([x_shape[0], x_shape[1], x_shape[2], 2], dtype=tf.float32, name='initial_flow')
    flow_estimated = {}
    flow_estimated['level_6'] = estimator(x1_feature['conv6_2'], x2_feature['conv6_2'], 
        initial_flow, train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='estimator_level_6')['conv6']
    
    for i in range(4):
        feature_name = 'conv%d_2' % (5-i)
        feature_size = tf.shape(x1_feature[feature_name])[1:3]
        initial_flow = flow_resize(flow_estimated['level_%d' % (6-i)], feature_size, is_scale=is_scale)
        if i == 3:
            estimator_net_level_2 = estimator(x1_feature[feature_name], x2_feature[feature_name], 
                initial_flow, train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='estimator_level_%d' % (5-i))
            flow_estimated['level_2'] = estimator_net_level_2['conv6']
        else:
            flow_estimated['level_%d' % (5-i)] = estimator(x1_feature[feature_name], x2_feature[feature_name], 
                initial_flow, train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='estimator_level_%d' % (5-i))['conv6']
    
    x_feature = estimator_net_level_2['conv5']
    flow_estimated['refined'] = context_network(x_feature, flow_estimated['level_2'], train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='context_network')
    flow_estimated['full_res'] = flow_resize(flow_estimated['refined'], img_size, is_scale=is_scale)     
        
    return flow_estimated   
Beispiel #3
0
def pyramid_processing_three_frame(batch_img, x0_feature, x1_feature, x2_feature, train=True, trainable=True, reuse=None, regularizer=None, is_scale=True):
    x_shape = tf.shape(x1_feature['conv6_2'])
    initial_flow_fw = tf.zeros([x_shape[0], x_shape[1], x_shape[2], 2], dtype=tf.float32, name='initial_flow_fw')
    initial_flow_bw = tf.zeros([x_shape[0], x_shape[1], x_shape[2], 2], dtype=tf.float32, name='initial_flow_bw')
    flow_fw = {}
    flow_bw = {}
    net_fw, net_bw = estimator(x0_feature['conv6_2'], x1_feature['conv6_2'], x2_feature['conv6_2'], 
        initial_flow_fw, initial_flow_bw, train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='estimator_level_6')
    flow_fw['level_6'] = net_fw['conv6']
    flow_bw['level_6'] = net_bw['conv6']
    
    
    for i in range(4):
        feature_name = 'conv%d_2' % (5-i)
        level = 'level_%d' % (5-i)
        feature_size = tf.shape(x1_feature[feature_name])[1:3]
        initial_flow_fw = flow_resize(flow_fw['level_%d' % (6-i)], feature_size, is_scale=is_scale)
        initial_flow_bw = flow_resize(flow_bw['level_%d' % (6-i)], feature_size, is_scale=is_scale)
        net_fw, net_bw = estimator(x0_feature[feature_name], x1_feature[feature_name], x2_feature[feature_name], 
            initial_flow_fw, initial_flow_bw, train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='estimator_level_%d' % (5-i))           
        flow_fw[level] = net_fw['conv6']
        flow_bw[level] = net_bw['conv6'] 
    
    flow_concat_fw = tf.concat([flow_fw['level_2'], -flow_bw['level_2']], -1)
    flow_concat_bw = tf.concat([flow_bw['level_2'], -flow_fw['level_2']], -1)       
    
    x_feature = tf.concat([net_fw['conv5'], net_bw['conv5']], axis=-1)
    flow_fw['refined'] = context_network(x_feature, flow_concat_fw, train=train, trainable=trainable, reuse=reuse, regularizer=regularizer, name='context_network')
    flow_size = tf.shape(batch_img)[1:3]
    flow_fw['full_res'] = flow_resize(flow_fw['refined'], flow_size, is_scale=is_scale) 
    
    x_feature = tf.concat([net_bw['conv5'], net_fw['conv5']], axis=-1)
    flow_bw['refined'] = context_network(x_feature, flow_concat_bw, train=train, trainable=trainable, reuse=True, regularizer=regularizer, name='context_network')
    flow_bw['full_res'] = flow_resize(flow_bw['refined'], flow_size, is_scale=is_scale)  
    
    return flow_fw, flow_bw
                                    method=1,
                                    align_corners=True)
batch_img1 = tf.image.resize_images(b_img1, [new_h, new_w],
                                    method=1,
                                    align_corners=True)
batch_img2 = tf.image.resize_images(b_img2, [new_h, new_w],
                                    method=1,
                                    align_corners=True)

flow_fw, flow_bw = pyramid_processing(batch_img0,
                                      batch_img1,
                                      batch_img2,
                                      train=False,
                                      trainable=False,
                                      is_scale=True)
flow_fw['full_res'] = flow_resize(flow_fw['full_res'], [h, w], method=1)
flow_bw['full_res'] = flow_resize(flow_bw['full_res'], [h, w], method=1)

flow_fw_color = flow_to_color(flow_fw['full_res'], mask=None, max_flow=256)
flow_bw_color = flow_to_color(flow_bw['full_res'], mask=None, max_flow=256)

restore_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
saver = tf.train.Saver(var_list=restore_vars)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver.restore(sess, restore_model)


def fb_check(w_warp, w_back):  #optimizable? cuda?

    weights = np.ones(w_warp.shape[:2])
Beispiel #5
0
    def eval(self, restore_model, save_dir, is_normalize_img=True):
        from test_datasets_eval import BasicDataset
        from error_metrics import flow_error_avg, outlier_pct, merge_dictionaries
        dataset = BasicDataset(data_list_file=self.dataset_config['data_list_file'], img_dir=self.dataset_config['img_dir'], is_normalize_img=is_normalize_img)
        save_name_list = dataset.data_list[:, -1]
        iterator = dataset.create_one_shot_iterator(dataset.data_list, num_parallel_calls=self.num_input_threads)
        batch_img0, batch_img1, batch_img2, flow_noc, flow_occ, mask_noc, mask_occ = iterator.get_next()
        img_shape = tf.shape(batch_img0)
        h = img_shape[1]
        w = img_shape[2]
        
        new_h = tf.where(tf.equal(tf.mod(h, 64), 0), h, (tf.to_int32(tf.floor(h / 64) + 1)) * 64)
        new_w = tf.where(tf.equal(tf.mod(w, 64), 0), w, (tf.to_int32(tf.floor(w / 64) + 1)) * 64)
        
        batch_img0 = tf.image.resize_images(batch_img0, [new_h, new_w], method=1, align_corners=True)
        batch_img1 = tf.image.resize_images(batch_img1, [new_h, new_w], method=1, align_corners=True)
        batch_img2 = tf.image.resize_images(batch_img2, [new_h, new_w], method=1, align_corners=True)
        
        flow_fw, _ = pyramid_processing(batch_img0, batch_img1, batch_img2, train=False, trainable=False, is_scale=True) 
        flow_fw['full_res'] = flow_resize(flow_fw['full_res'], [h, w], method=1)       
        flow_fw_color = flow_to_color(flow_fw['full_res'], mask=None, max_flow=256)
        error_fw_color = flow_error_image(flow_fw['full_res'], flow_occ, mask_occ)
        errors = {}
        errors['EPE_noc'] = flow_error_avg(flow_noc, flow_fw['full_res'], mask_noc)
        errors['EPE_all'] = flow_error_avg(flow_occ, flow_fw['full_res'], mask_occ)
        errors['outliers_noc'] = outlier_pct(flow_noc, flow_fw['full_res'], mask_noc)
        errors['outliers_all'] = outlier_pct(flow_occ, flow_fw['full_res'], mask_occ)
        restore_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 
        saver = tf.train.Saver(var_list=restore_vars)
        sess = tf.Session()
        sess.run(tf.global_variables_initializer()) 
        sess.run(iterator.initializer) 
        saver.restore(sess, restore_model)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        sum_EPE_noc = 0.
        sum_EPE_all = 0.
        sum_outliers_noc = 0.
        sum_outliers_all = 0.
        for i in range(dataset.data_num):
            np_flow_fw, np_flow_fw_color, np_error_fw_color = sess.run([flow_fw['full_res'], flow_fw_color, error_fw_color])
            EPE_noc, EPE_all, outliers_noc, outliers_all = sess.run([errors['EPE_noc'], errors['EPE_all'], errors['outliers_noc'], errors['outliers_all']])
            sum_EPE_noc += EPE_noc
            sum_EPE_all += EPE_all
            sum_outliers_noc += outliers_noc
            sum_outliers_all += outliers_all

            misc.imsave('%s/%s_10.png' % (save_dir, save_name_list[i]), np_flow_fw_color[0])
            misc.imsave('%s/error_%s.png' % (save_dir, save_name_list[i]), np_error_fw_color[0])
            #write_flo('%s/flow_fw_%s.flo' % (save_dir, save_name_list[i]), np_flow_fw[0])
            print('Finish %d/%d' % (i+1, dataset.data_num))

        print("EPE_noc: %f, EPE_all: %f" % (sum_EPE_noc/dataset.data_num, sum_EPE_all/dataset.data_num))
        print("F1_noc: %f, F1_all: %f" % (sum_outliers_noc/dataset.data_num, sum_outliers_all/dataset.data_num))

        
        
    

        
            
              
Beispiel #6
0
def compute_losses(flow_gt, flow_estimated, mask, is_scale=True):
    '''
        flow_estimated is a dict, containing flows estimated at different level.
    '''
    flow_gt_level = {}
    mask_level = {}

    for i in range(2, 7):
        level = 'level_%d' % i
        flow_size = tf.shape(flow_estimated[level])[1:3]
        flow_gt_level[level] = flow_resize(flow_gt,
                                           flow_size,
                                           is_scale=is_scale)
        mask_level[level] = tf.image.resize_images(mask,
                                                   flow_size,
                                                   method=1,
                                                   align_corners=True)

    losses = {}
    # --------------------------------- epe ----------------------------------
    epe_mean = {}
    epe_sum = {}
    epe_mean['full_res'], epe_sum['full_res'] = epe_loss(
        flow_gt - flow_estimated['full_res'], mask)
    epe_mean['refined'], epe_sum['refined'] = epe_loss(
        flow_gt_level['level_2'] - flow_estimated['refined'],
        mask_level['level_2'])
    for i in range(2, 7):
        level = 'level_%d' % i
        epe_mean[level], epe_sum[level] = epe_loss(
            flow_gt_level[level] - flow_estimated[level], mask_level[level])
    epe_sum['total'] = 0.0025*epe_sum['full_res'] + 0.005*epe_sum['level_2'] + 0.01*epe_sum['level_3'] + \
        0.02*epe_sum['level_4'] + 0.08*epe_sum['level_5'] + 0.32*epe_sum['level_6']
    losses['epe_mean'] = epe_mean
    losses['epe_sum'] = epe_sum

    # --------------------------------- mse ----------------------------------
    mse_mean = {}
    mse_sum = {}
    mse_mean['full_res'], mse_sum['full_res'] = mse_loss(
        flow_gt - flow_estimated['full_res'], mask)
    mse_mean['refined'], mse_sum['refined'] = mse_loss(
        flow_gt_level['level_2'] - flow_estimated['refined'],
        mask_level['level_2'])
    for i in range(2, 7):
        level = 'level_%d' % i
        mse_mean[level], mse_sum[level] = mse_loss(
            flow_gt_level[level] - flow_estimated[level], mask_level[level])
    mse_sum['total'] = 0.0025*mse_sum['full_res'] + 0.005*mse_sum['level_2'] + 0.01*mse_sum['level_3'] + \
        0.02*mse_sum['level_4'] + 0.08*mse_sum['level_5'] + 0.32*mse_sum['level_6']
    losses['mse_mean'] = mse_mean
    losses['mse_sum'] = mse_sum

    # --------------------------------- abs ----------------------------------
    abs_mean = {}
    abs_sum = {}
    abs_mean['full_res'], abs_sum['full_res'] = abs_loss(
        flow_gt - flow_estimated['full_res'], mask)
    abs_mean['refined'], abs_sum['refined'] = abs_loss(
        flow_gt_level['level_2'] - flow_estimated['refined'],
        mask_level['level_2'])
    for i in range(2, 7):
        level = 'level_%d' % i
        abs_mean[level], abs_sum[level] = abs_loss(
            flow_gt_level[level] - flow_estimated[level], mask_level[level])
    abs_sum['total'] = 0.0025*abs_sum['full_res'] + 0.005*abs_sum['level_2'] + 0.01*abs_sum['level_3'] + \
        0.02*abs_sum['level_4'] + 0.08*abs_sum['level_5'] + 0.32*abs_sum['level_6']
    losses['abs_mean'] = abs_mean
    losses['abs_sum'] = abs_sum

    ## -------------------------------- robust ---------------------------------
    #robust_mean = {}
    #robust_sum = {}
    #robust_mean['full_res'], robust_sum['full_res'] = robust_loss(flow_gt - flow_estimated['full_res'], mask)
    #robust_mean['refined'], robust_sum['refined'] = robust_loss(flow_gt_level['level_2'] - flow_estimated['refined'], mask_level['level_2'])
    #for i in range(2, 7):
    #level = 'level_%d' % i
    #robust_mean[level], robust_sum[level] = robust_loss(flow_gt_level[level] - flow_estimated[level], mask_level[level])
    #robust_sum['total'] = 0.005*robust_sum['refined'] + 0.005*robust_sum['level_2'] + 0.01*robust_sum['level_3'] + \
    #0.02*robust_sum['level_4'] + 0.08*robust_sum['level_5'] + 0.32*robust_sum['level_6']
    #losses['robust_mean'] = robust_mean
    #losses['robust_sum'] = robust_sum

    ## ------------------------------ abs_robust ---------------------------------
    abs_robust_mean = {}
    abs_robust_sum = {}
    abs_robust_mean['full_res'], abs_robust_sum['full_res'] = abs_robust_loss(
        flow_gt - flow_estimated['full_res'], mask)
    abs_robust_mean['refined'], abs_robust_sum['refined'] = abs_robust_loss(
        flow_gt_level['level_2'] - flow_estimated['refined'],
        mask_level['level_2'])
    for i in range(2, 7):
        level = 'level_%d' % i
        abs_robust_mean[level], abs_robust_sum[level] = abs_robust_loss(
            flow_gt_level[level] - flow_estimated[level], mask_level[level])
    abs_robust_sum['total'] = 0.0025*abs_robust_sum['full_res'] + 0.005*abs_robust_sum['level_2'] + 0.01*abs_robust_sum['level_3'] + \
        0.02*abs_robust_sum['level_4'] + 0.08*abs_robust_sum['level_5'] + 0.32*abs_robust_sum['level_6']
    losses['abs_robust_mean'] = abs_robust_mean
    losses['abs_robust_sum'] = abs_robust_sum

    return losses