Ejemplo n.º 1
0
def cne_interface(sfm_cfg):
    """Entry point to refine matches with CNe.
    
    Parameters
    ----------
    sfm_cfg: Config.
    """

    # Get data
    data, key_list = make_xy(sfm_cfg)
    data_dict = {}
    data_dict['test'] = data

    # Construct cne config
    cne_cfg = get_cne_config()

    # Init network
    mynet = MyNetwork(cne_cfg)

    # Run CNe
    t_start = time()
    mask_dict = mynet.test(data_dict)
    ellapsed = time() - t_start

    # Save CNe timings
    save_h5({'cost': ellapsed / len(key_list)}, get_filter_cost_file(sfm_cfg))
    print('CNe cost (averaged over image pairs): {:0.2f} sec'.format(
        np.mean(ellapsed / len(key_list))))

    # Extract match mask
    save_match_inlier(sfm_cfg, key_list, mask_dict)
Ejemplo n.º 2
0
def compute_pose_error(cfg):
    '''
    Computes the error using quaternions and translation vector for COLMAP
    '''

    if os.path.exists(get_colmap_pose_file(cfg)):
        print(' -- already exists, skipping COLMAP eval')
        return

    # Load visiblity and images
    image_path_list = get_colmap_image_path_list(cfg)
    subset_index = get_colmap_image_subset_index(cfg, image_path_list)
    image_name_list = get_item_name_list(image_path_list)

    # Load camera information
    data_dir = get_data_path(cfg)
    calib_list = get_fullpath_list(data_dir, 'calibration')
    calib_dict = load_calib(calib_list, subset_index)

    # Generate all possible pairs from all images
    pair_list = []
    for ii in range(len(image_path_list)):
        for jj in range(ii + 1, len(image_path_list)):
            pair_list.append([ii, jj])

    # Check if colmap results exist. Otherwise, this whole bag is a fail.
    colmap_output_path = get_colmap_output_path(cfg)
    is_colmap_valid = os.path.exists(os.path.join(colmap_output_path, '0'))

    if is_colmap_valid:

        # Find the best colmap reconstruction
        best_index = get_best_colmap_index(cfg)

        print('Computing pose errors')
        #num_cores = int(multiprocessing.cpu_count() * 0.9)
        num_cores = int(len(os.sched_getaffinity(0)) * 0.9)
        result = Parallel(n_jobs=num_cores)(
            delayed(compute_stereo_metrics_from_colmap)(image_path_list[
                pair[0]], image_path_list[pair[1]], calib_dict[image_name_list[
                    pair[0]]], calib_dict[image_name_list[pair[1]]],
                                                        best_index, cfg)
            for pair in tqdm(pair_list))

    # Collect err_q, err_t from results
    err_dict = {}
    for _i in range(len(pair_list)):
        pair = pair_list[_i]
        if is_colmap_valid:
            err_q = result[_i][0]
            err_t = result[_i][1]
        else:
            err_q = np.inf
            err_t = np.inf
        err_dict[image_name_list[pair[0]] + '-' +
                 image_name_list[pair[1]]] = [err_q, err_t]

    # Finally, save packed errors
    save_h5(err_dict, get_colmap_pose_file(cfg))
Ejemplo n.º 3
0
def main(cfg):
    '''Main function to compute matches.

    Parameters
    ----------
    cfg: Namespace
        Configurations for running this part of the code.

    '''

    if os.path.exists(get_match_file(cfg)):
        print(' -- already exists, skipping match computation')
        return

    # Get data directory
    data_dir = get_data_path(cfg)

    # Load pre-computed pairs with the new visibility criteria
    print('Reading list of all possible pairs')
    pairs = get_pairs_per_threshold(data_dir)['0.0']
    print('{} pre-computed pairs'.format(len(pairs)))

    # Load descriptors
    descriptors_dict = load_h5(get_desc_file(cfg))
    keypoints_dict = load_h5(get_kp_file(cfg))

    # Feature Matching
    print('Computing matches')
    num_cores = cfg.num_opencv_threads if cfg.num_opencv_threads > 0 else int(
        len(os.sched_getaffinity(0)) * 0.9)
    if WITH_FAISS:
        num_cores = min(4, num_cores)
    result = Parallel(n_jobs=num_cores)(
        delayed(compute_matches)(np.asarray(descriptors_dict[pair.split(
            '-')[0]]), np.asarray(descriptors_dict[pair.split(
                '-')[1]]), cfg, np.asarray(keypoints_dict[pair.split(
                    '-')[0]]), np.asarray(keypoints_dict[pair.split('-')[1]]))
        for pair in tqdm(pairs))

    # Make match dictionary
    matches_dict = {}
    timings_list = []
    for i, pair in enumerate(pairs):
        matches_dict[pair] = result[i][0]
        timings_list.append(result[i][1])

    # Check match directory
    if not os.path.exists(get_match_path(cfg)):
        os.makedirs(get_match_path(cfg))

    # Finally save packed matches
    save_h5(matches_dict, get_match_file(cfg))

    # Save computational cost
    save_h5({'cost': np.mean(timings_list)}, get_match_cost_file(cfg))
    print('Matching cost (averaged over image pairs): {:0.2f} sec'.format(
        np.mean(timings_list)))
Ejemplo n.º 4
0
def save_match_inlier(sfm_cfg, key_list, mask_dict):
    match_dict = load_h5(get_match_file(sfm_cfg))

    if len(match_dict) != len(mask_dict):
        raise RuntimeError('Number of pairs from CNe output is different '
                           'from original data!')

    for key, match_mask in mask_dict.items():
        mask_index = np.where(match_mask)
        match_idx_pairs_inlier = match_dict[key_list[key]][:, mask_index]
        match_dict[key_list[key]] = np.squeeze(match_idx_pairs_inlier)

    save_h5(match_dict, get_filter_match_file(sfm_cfg))
Ejemplo n.º 5
0
def main(cfg):
    '''Main function to compute model.

    Parameters
    ----------
    cfg: Namespace
        Configurations for running this part of the code.

    '''

    if os.path.exists(get_geom_file(cfg)):
        print(' -- already exists, skipping model computation')
        return

    # Get data directory
    keypoints_dict = load_h5(get_kp_file(cfg))

    # Load keypoints and matches
    matches_dict = load_h5(get_filter_match_file_for_computing_model(cfg))

    # Feature Matching
    print('Computing model')
    num_cores = cfg.num_opencv_threads if cfg.num_opencv_threads > 0 else int(
        len(os.sched_getaffinity(0)) * 0.9)
    # Load camera information
    data_dir = get_data_path(cfg)
    images_list = get_fullpath_list(data_dir, 'images')
    image_names = get_item_name_list(images_list)

    calib_list = get_fullpath_list(data_dir, 'calibration')
    calib_dict = load_calib(calib_list)
    pairs_per_th = get_pairs_per_threshold(data_dir)

    # Get data directory
    try:
        desc_dict = defaultdict(list)
        desc_dict = load_h5(get_desc_file(cfg))
        for k, v in desc_dict.items():
            desc_dict[k] = v
    except Exception:
        desc_dict = defaultdict(list)

    try:
        aff_dict = defaultdict(list)
        aff_dict1 = load_h5(get_affine_file(cfg))
        for k, v in aff_dict1.items():
            aff_dict[k] = v
    except Exception:
        aff_dict = defaultdict(list)

    try:
        ori_dict = defaultdict(list)
        ori_dict1 = load_h5(get_angle_file(cfg))
        for k, v in ori_dict1.items():
            ori_dict[k] = v
    except Exception:
        ori_dict = defaultdict(list)
    try:
        scale_dict = defaultdict(list)
        scale_dict1 = load_h5(get_scale_file(cfg))
        for k, v in scale_dict1.items():
            scale_dict[k] = v
    except Exception:
        scale_dict = defaultdict(list)

    random.shuffle(pairs_per_th['0.0'])
    result = Parallel(n_jobs=num_cores)(delayed(compute_model)(
        cfg, np.asarray(matches_dict[pair]),
        np.asarray(keypoints_dict[pair.split('-')[0]]),
        np.asarray(keypoints_dict[pair.split('-')[1]]), calib_dict[pair.split(
            '-')[0]], calib_dict[pair.split('-')[1]], images_list[
                image_names.index(pair.split('-')[0])], images_list[
                    image_names.index(pair.split('-')[1])],
        np.asarray(scale_dict[pair.split('-')[0]]),
        np.asarray(scale_dict[pair.split('-')[1]]),
        np.asarray(ori_dict[pair.split('-')[0]]),
        np.asarray(ori_dict[pair.split('-')[1]]),
        np.asarray(aff_dict[pair.split('-')[0]]),
        np.asarray(aff_dict[pair.split('-')[1]]),
        np.asarray(desc_dict[pair.split('-')[0]]),
        np.asarray(desc_dict[pair.split('-')[1]]))
                                        for pair in tqdm(pairs_per_th['0.0']))

    # Make model dictionary
    model_dict = {}
    inl_dict = {}
    timings_list = []
    for i, pair in enumerate(pairs_per_th['0.0']):
        model_dict[pair] = result[i][0]
        inl_dict[pair] = result[i][1]
        timings_list.append(result[i][2])

    # Check model directory
    if not os.path.exists(get_geom_path(cfg)):
        os.makedirs(get_geom_path(cfg))

    # Finally save packed models
    save_h5(model_dict, get_geom_file(cfg))
    save_h5(inl_dict, get_geom_inl_file(cfg))

    # Save computational cost
    save_h5({'cost': np.mean(timings_list)}, get_geom_cost_file(cfg))
    print('Geometry cost (averaged over image pairs): {:0.2f} sec'.format(
        np.mean(timings_list)))
def main(cfg):
    '''Main function to compute features.

    Parameters
    ----------
    cfg: Namespace
        Configuration
    '''

    if os.path.exists(get_kp_file(cfg)) and os.path.exists(get_desc_file(cfg)):
        print(' -- already exists, skipping feature extraction')
        return

    # Get data directory
    data_dir = get_data_path(cfg)

    # Get list of all images and visibility files in the 'set_100'
    images_list = get_fullpath_list(data_dir, 'images')

    # Also create a list which only contains the image names, so that it can be
    # used as keys in the dictionary later
    image_names = get_item_name_list(images_list)

    # Create folder
    save_dir = get_feature_path(cfg)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # Compute and save keypoints and descriptors
    #
    # Parallel processing actually slows down stuff, because opencv is already
    # using multiple threads. We just simply go through one by one without
    # parallel processing for now
    print('Extracting Keypoints and Descriptors:')
    result = []
    for img_path in tqdm(images_list):
        result.append(compute_per_img_file(img_path, cfg))

    # num_cores = int(multiprocessing.cpu_count() * 0.9)
    # print('Extracting Keypoints and Descriptors:')
    # result = Parallel(n_jobs=num_cores)(delayed(compute_per_img_file)(
    #     img_path, cfg) for img_path in tqdm(images_list))

    # Save keypoints and descriptors
    kp_dict = {}
    scale_dict = {}
    angle_dict = {}
    score_dict = {}
    descs_dict = {}
    affine_dict = {}
    for _i in range(len(image_names)):
        assert 'kp' in result[_i], 'Must provide keypoints'
        assert 'descs' in result[_i], 'Must provide descriptors'
        if 'kp' in result[_i]:
            kp_dict[image_names[_i]] = result[_i]['kp']
        if 'scale' in result[_i]:
            scale_dict[image_names[_i]] = result[_i]['scale']
        if 'angle' in result[_i]:
            angle_dict[image_names[_i]] = result[_i]['angle']
        if 'affine' in result[_i]:
            affine_dict[image_names[_i]] = result[_i]['affine']
        if 'score' in result[_i]:
            score_dict[image_names[_i]] = result[_i]['score']
        if 'descs' in result[_i]:
            descs_dict[image_names[_i]] = result[_i]['descs']

    # Finally, save packed keypoints and descriptors
    save_h5(kp_dict, get_kp_file(cfg))
    save_h5(scale_dict, get_scale_file(cfg))
    save_h5(angle_dict, get_angle_file(cfg))
    save_h5(score_dict, get_score_file(cfg))
    save_h5(descs_dict, get_desc_file(cfg))
    save_h5(affine_dict, get_affine_file(cfg))
Ejemplo n.º 7
0
def main(cfg):
    '''Main function to compute matches.

    Parameters
    ----------
    cfg: Namespace
        Configurations for running this part of the code.

    '''

    # Get data directory
    data_dir = get_data_path(cfg)

    # Load pre-computed pairs with the new visibility criteria
    pairs_per_th = get_pairs_per_threshold(data_dir)

    # Check if all files exist
    if is_stereo_complete(cfg):
        print(' -- already exists, skipping stereo eval')
        return

    # Load keypoints and matches
    keypoints_dict = load_h5(get_kp_file(cfg))
    matches_dict = load_h5(get_match_file(cfg))
    geom_dict = load_h5(get_geom_file(cfg))
    geom_inl_dict = load_h5(get_geom_inl_file(cfg))

    filter_matches_dict = load_h5(get_filter_match_file(cfg))

    # Load visiblity and images
    images_list = get_fullpath_list(data_dir, 'images')
    vis_list = get_fullpath_list(data_dir, 'visibility')
    if cfg.dataset != 'googleurban':
        depth_maps_list = get_fullpath_list(data_dir, 'depth_maps')
    image_names = get_item_name_list(images_list)

    # Load camera information
    calib_list = get_fullpath_list(data_dir, 'calibration')
    calib_dict = load_calib(calib_list)

    # Generate all possible pairs
    print('Generating list of all possible pairs')
    pairs = compute_image_pairs(vis_list, len(image_names), cfg.vis_th)
    print('Old pairs with the point-based visibility threshold: {} '
          '(for compatibility)'.format(len(pairs)))
    for k, v in pairs_per_th.items():
        print('New pairs at visibility threshold {}: {}'.format(k, len(v)))

    # Evaluate each stereo pair in parallel
    # Compute it for all pairs (i.e. visibility threshold 0)
    print('Compute stereo metrics for all pairs')
    #num_cores = int(multiprocessing.cpu_count() * 0.9)
    num_cores = int(len(os.sched_getaffinity(0)) * 0.9)

    result = Parallel(n_jobs=num_cores)(delayed(compute_stereo_metrics_from_E)(
        images_list[image_names.index(pair.split('-')[0])], images_list[
            image_names.index(pair.split('-')[1])],
        depth_maps_list[image_names.index(pair.split('-')[0])] if cfg.
        dataset != 'googleurban' else None, depth_maps_list[image_names.index(
            pair.split('-')[1])] if cfg.dataset != 'googleurban' else None,
        np.asarray(keypoints_dict[pair.split('-')[0]]),
        np.asarray(keypoints_dict[pair.split('-')[1]]), calib_dict[pair.split(
            '-')[0]], calib_dict[pair.split('-')
                                 [1]], geom_dict[pair], matches_dict[pair],
        filter_matches_dict[pair], geom_inl_dict[pair], cfg)
                                        for pair in tqdm(pairs_per_th['0.0']))

    # Convert previous visibility list to strings
    old_keys = []
    for pair in pairs:
        old_keys.append('{}-{}'.format(image_names[pair[0]],
                                       image_names[pair[1]]))

    # Extract scores, err_q, err_t from results
    all_keys = pairs_per_th['0.0']
    err_dict, rep_s_dict = {}, {}
    geo_s_dict_pre_match, geo_s_dict_refined_match, \
        geo_s_dict_final_match = {}, {}, {}
    true_s_dict_pre_match, true_s_dict_refined_match, \
        true_s_dict_final_match = {}, {}, {}
    for i in range(len(result)):
        if all_keys[i] in old_keys:
            if result[i][5]:
                geo_s_dict_pre_match[
                    all_keys[i]] = result[i][0][0] if result[i][0] else None
                geo_s_dict_refined_match[
                    all_keys[i]] = result[i][0][1] if result[i][0] else None
                geo_s_dict_final_match[
                    all_keys[i]] = result[i][0][2] if result[i][0] else None
                true_s_dict_pre_match[
                    all_keys[i]] = result[i][1][0] if result[i][1] else None
                true_s_dict_refined_match[
                    all_keys[i]] = result[i][1][1] if result[i][1] else None
                true_s_dict_final_match[
                    all_keys[i]] = result[i][1][2] if result[i][1] else None
                err_q = result[i][2]
                err_t = result[i][3]
                rep_s_dict[all_keys[i]] = result[i][4]
                err_dict[all_keys[i]] = [err_q, err_t]
    print('Aggregating results for the old visibility constraint: '
          '{}/{}'.format(len(geo_s_dict_pre_match), len(result)))

    # Repeat with the new visibility threshold
    err_dict_th, rep_s_dict_th = {}, {}
    geo_s_dict_pre_match_th, geo_s_dict_refined_match_th, \
        geo_s_dict_final_match_th = {}, {}, {}
    true_s_dict_pre_match_th, true_s_dict_refined_match_th, \
        true_s_dict_final_match_th = {}, {}, {}
    for th, cur_pairs in pairs_per_th.items():
        _err_dict, _rep_s_dict = {}, {}
        _geo_s_dict_pre_match, _geo_s_dict_refined_match, \
            _geo_s_dict_final_match = {}, {}, {}
        _true_s_dict_pre_match, _true_s_dict_refined_match, \
            _true_s_dict_final_match = {}, {}, {}
        for i in range(len(all_keys)):
            if len(cur_pairs) > 0 and all_keys[i] in cur_pairs:
                if result[i][5]:
                    _geo_s_dict_pre_match[all_keys[
                        i]] = result[i][0][0] if result[i][0] else None
                    _geo_s_dict_refined_match[all_keys[
                        i]] = result[i][0][1] if result[i][0] else None
                    _geo_s_dict_final_match[all_keys[
                        i]] = result[i][0][2] if result[i][0] else None
                    _true_s_dict_pre_match[all_keys[
                        i]] = result[i][1][0] if result[i][1] else None
                    _true_s_dict_refined_match[all_keys[
                        i]] = result[i][1][1] if result[i][1] else None
                    _true_s_dict_final_match[all_keys[
                        i]] = result[i][1][2] if result[i][1] else None
                    err_q = result[i][2]
                    err_t = result[i][3]
                    _rep_s_dict[
                        all_keys[i]] = result[i][4] if result[i][4] else None
                    _err_dict[all_keys[i]] = [err_q, err_t]
        geo_s_dict_pre_match_th[th] = _geo_s_dict_pre_match
        geo_s_dict_refined_match_th[th] = _geo_s_dict_refined_match
        geo_s_dict_final_match_th[th] = _geo_s_dict_final_match
        true_s_dict_pre_match_th[th] = _true_s_dict_pre_match
        true_s_dict_refined_match_th[th] = _true_s_dict_refined_match
        true_s_dict_final_match_th[th] = _true_s_dict_final_match
        err_dict_th[th] = _err_dict
        rep_s_dict_th[th] = _rep_s_dict
        print('Aggregating results for threshold "{}": {}/{}'.format(
            th, len(geo_s_dict_pre_match_th[th]), len(result)))

    # Create results folder if it does not exist
    if not os.path.exists(get_stereo_path(cfg)):
        os.makedirs(get_stereo_path(cfg))

    # Finally, save packed scores and errors
    if cfg.dataset != 'googleurban':
        save_h5(geo_s_dict_pre_match, get_stereo_epipolar_pre_match_file(cfg))
        save_h5(geo_s_dict_refined_match,
                get_stereo_epipolar_refined_match_file(cfg))
        save_h5(geo_s_dict_final_match,
                get_stereo_epipolar_final_match_file(cfg))

        save_h5(true_s_dict_pre_match,
                get_stereo_depth_projection_pre_match_file(cfg))
        save_h5(true_s_dict_refined_match,
                get_stereo_depth_projection_refined_match_file(cfg))
        save_h5(true_s_dict_final_match,
                get_stereo_depth_projection_final_match_file(cfg))
        save_h5(rep_s_dict, get_repeatability_score_file(cfg))
    save_h5(err_dict, get_stereo_pose_file(cfg))

    for th in pairs_per_th:
        if cfg.dataset != 'googleurban':
            save_h5(geo_s_dict_pre_match_th[th],
                    get_stereo_epipolar_pre_match_file(cfg, th))
            save_h5(geo_s_dict_refined_match_th[th],
                    get_stereo_epipolar_refined_match_file(cfg, th))
            save_h5(geo_s_dict_final_match_th[th],
                    get_stereo_epipolar_final_match_file(cfg, th))
            save_h5(true_s_dict_pre_match_th[th],
                    get_stereo_depth_projection_pre_match_file(cfg, th))
            save_h5(true_s_dict_refined_match_th[th],
                    get_stereo_depth_projection_refined_match_file(cfg, th))
            save_h5(true_s_dict_final_match_th[th],
                    get_stereo_depth_projection_final_match_file(cfg, th))
            save_h5(rep_s_dict_th[th], get_repeatability_score_file(cfg, th))
        save_h5(err_dict_th[th], get_stereo_pose_file(cfg, th))
Ejemplo n.º 8
0
            keypoints_dict = load_h5(cfg.import_path / seq / 'keypoints.h5')
            matches_dict = load_h5(mpath)
            pairs = list(matches_dict.keys())
            cfg.task = 'stereo'

            for run in range(3):
                print('Run {}'.format(run))
                random.shuffle(pairs)
                calib = {'K': np.eye(3)}
                names = [p.split('-') for p in pairs]

                result = Parallel(n_jobs=num_cores)(
                    delayed(compute_model)(cfg, np.asarray(matches_dict[pair]),
                                           np.asarray(keypoints_dict[n0]),
                                           np.asarray(keypoints_dict[n1]),
                                           calib, calib, None, None)
                    for pair, (
                        n0, n1) in tqdm(zip(pairs, names), total=len(pairs)))

                inl_dict = {pair: result[i][1] for i, pair in enumerate(pairs)}
                save_h5(inl_dict,
                        export_root / seq / 'matches_stereo_{}.h5'.format(run))

        method['config_phototourism_stereo']['geom'] = {'method': 'cv2-8pt'}
        est = label.split('_')[-1]
        method['config_phototourism_stereo']['custom_matches_name'] += est
        method['config_phototourism_multiview']['custom_matches_name'] += est

        with open(export_root / 'config_{}.json'.format(label), 'w') as f:
            json.dump(method, f, indent=2)
Ejemplo n.º 9
0
    # Extract match mask
    save_match_inlier(sfm_cfg, key_list, mask_dict)


if __name__ == '__main__':
    cfg, unparsed = get_config()

    # If we have unparsed arguments, print usage and exit
    if len(unparsed) > 0:
        print_usage()
        exit(1)

    if not os.path.exists(get_filter_path(cfg)):
        os.makedirs(get_filter_path(cfg))

    cur_key = 'config_{}_{}'.format(cfg.dataset, cfg.task)
    if cur_key not in cfg.method_dict:
        raise ValueError('Cannot find "{}"'.format(cur_key))
    cur_filter = cfg.method_dict[cur_key]['outlier_filter']

    if cur_filter['method'] == 'cne-bp-nd':
        from third_party.cne.config import get_config as get_cne_config_from_cne
        from third_party.cne.network import MyNetwork
        from third_party.cne.geom import get_sampsons
        cne_interface(cfg)
    elif cur_filter['method'] == 'none':
        copyfile(get_match_file(cfg), get_filter_match_file(cfg))
        save_h5({'cost': 0.0}, get_filter_cost_file(cfg))
    else:
        raise ValueError('Unknown prefilter type')