def is_filter_complete(cfg):
    '''Checks if stereo evaluation is complete.'''

    # We should have the colmap pose file and no colmap temp path
    is_complete = os.path.exists(get_filter_match_file(cfg))

    return is_complete
Exemple #2
0
def save_match_inlier(sfm_cfg, key_list, mask_dict):
    match_dict = load_h5(get_match_file(sfm_cfg))

    if len(match_dict) != len(mask_dict):
        raise RuntimeError('Number of pairs from CNe output is different '
                           'from original data!')

    for key, match_mask in mask_dict.items():
        mask_index = np.where(match_mask)
        match_idx_pairs_inlier = match_dict[key_list[key]][:, mask_index]
        match_dict[key_list[key]] = np.squeeze(match_idx_pairs_inlier)

    save_h5(match_dict, get_filter_match_file(sfm_cfg))
Exemple #3
0
def compute_num_input_matches(res_dict, deprecated_images, cfg):
    '''Save the number of input matches given to Colmap.'''

    # TODO fix this after re-implementing custom matches
    # if cfg.method_dict['config_{}_{}'.format(cfg.dataset,
    #                                          cfg.task)]['use_custom_matches']:
    #     raise NotImplementedError(
    #         'TODO Load the right dict with custom matches')

    # Read match dict
    matches_dict = load_h5_valid_image(
        get_filter_match_file(cfg) ,deprecated_images)

    # For every bag, compute the number of matches going into colmap
    bag_size_json = load_json(
        getattr(cfg, 'splits_{}_{}'.format(cfg.dataset, cfg.subset)))
    bag_size_list = [b['bag_size'] for b in bag_size_json]
    bag_size_num = [b['num_in_bag'] for b in bag_size_json]

    # Average it per bag size first, then across all bag sizes
    num_input_matches = []
    for bag_size, cur_bag_size_num in zip(bag_size_list, bag_size_num):
        num_input_matches_bagsize = []
        for bag_id in range(cur_bag_size_num):
            cfg_bag = deepcopy(cfg)
            cfg_bag.bag_size = bag_size
            cfg_bag.bag_id = bag_id

            # Skip if bag contain deprecated images
            if not valid_bag(cfg_bag, deprecated_images):
                continue

            images = get_colmap_image_path_list(cfg_bag)
            keys = [os.path.splitext(os.path.basename(im))[0] for im in images]
            pairs = []
            for i in range(len(keys)):
                for j in range(i + 1, len(keys)):
                    pairs.append('-'.join(
                        sorted([keys[i], keys[j]], reverse=True)))
            for pair in pairs:
                num_input_matches_bagsize.append(matches_dict[pair].shape[-1])
        num_input_matches.append(np.mean(num_input_matches_bagsize))

    res_dict['num_input_matches'] = np.mean(num_input_matches)
Exemple #4
0
def run_colmap_for_bag(cfg):
    '''Runs colmap to retrieve poses for each bag'''

    # Colmap pose file already exists, skip the session
    if os.path.exists(get_colmap_pose_file(cfg)):
        print(' -- already exists, skipping COLMAP eval')
        return

    # Load keypoints and matches
    keypoints_dict = load_h5(get_kp_file(cfg))

    matches_dict = load_h5(get_filter_match_file(cfg))

    print('Running COLMAP on "{}", bagsize {} -- bag {}'.format(
        cfg.scene, cfg.bag_size, cfg.bag_id))

    # Additional sanity check to account for crash -- in this case colmap temp
    # directory can exist. This in an indication that you need to remove
    # results and rerun colmap.
    colmap_temp_path = get_colmap_temp_path(cfg)
    colmap_output_path = get_colmap_output_path(cfg)
    if os.path.exists(colmap_temp_path):
        print(' -- temp path exists - cleaning up from crash')
        rmtree(colmap_temp_path)
        if os.path.exists(colmap_output_path):
            rmtree(colmap_output_path)
        if os.path.exists(get_colmap_pose_file(cfg)):
            os.remove(get_colmap_pose_file(cfg))

    # Check existance of colmap result and terminate if already exists.
    colmap_output_path = get_colmap_output_path(cfg)
    if os.path.exists(colmap_output_path):
        print(' -- already exists, skipping COLMAP session')
        return

    # Create output directory
    os.makedirs(colmap_output_path)

    # Create colmap temporary directory and copy files over. Remove anything
    # that might have existed.
    colmap_temp_path = get_colmap_temp_path(cfg)
    if os.path.exists(colmap_temp_path):
        rmtree(colmap_temp_path)

    # Make sure old data is gone and create a new temp folder
    assert (not os.path.exists(colmap_temp_path))
    os.makedirs(colmap_temp_path)

    # Create colmap-friendy structures
    os.makedirs(os.path.join(colmap_temp_path, 'images'))
    os.makedirs(os.path.join(colmap_temp_path, 'features'))

    # Get list of all images in this bag
    image_subset_list = get_colmap_image_path_list(cfg)

    subset_index = get_colmap_image_subset_index(cfg, image_subset_list)

    # Copy images
    for _src in image_subset_list:
        _dst = os.path.join(colmap_temp_path, 'images', os.path.basename(_src))
        copyfile(_src, _dst)

    # Write features to colmap friendly format
    for image_path in image_subset_list:
        # Retrieve image name, with and without extension
        image_name = os.path.basename(image_path)
        image_name_no_ext = os.path.splitext(image_name)[0]
        # Read keypoint
        keypoints = keypoints_dict[image_name_no_ext]
        # Keypoint file to write to
        kp_file = os.path.join(colmap_temp_path, 'features',
                               image_name + '.txt')
        # Open a file to write
        with open(kp_file, 'w') as f:
            # Retieve the number of keypoints
            len_keypoints = len(keypoints)
            f.write(str(len_keypoints) + ' ' + str(128) + '\n')
            for i in range(len_keypoints):
                kp = ' '.join(str(k) for k in keypoints[i][:4])
                desc = ' '.join(str(0) for d in range(128))
                f.write(kp + ' ' + desc + '\n')

    # Write matches to colmap friendly format
    # Read visibilties
    data_dir = get_data_path(cfg)
    vis_list = get_fullpath_list(data_dir, 'visibility')

    # Load matches and store them to a text file
    # TODO: This seems to be done multiple times. Do we need to do this?
    print('Generate list of all possible pairs')
    pairs = compute_image_pairs(vis_list, len(image_subset_list), cfg.vis_th,
                                subset_index)
    print('{} pairs generated'.format(len(pairs)))

    # Write to match file
    match_file = os.path.join(colmap_temp_path, 'matches.txt')
    with open(match_file, 'w') as f:
        for pair in pairs:
            image_1_name = os.path.basename(image_subset_list[pair[0]])
            image_2_name = os.path.basename(image_subset_list[pair[1]])
            image_1_name_no_ext = os.path.splitext(image_1_name)[0]
            image_2_name_no_ext = os.path.splitext(image_2_name)[0]

            # Load matches
            key = '-'.join([image_1_name_no_ext, image_2_name_no_ext])
            matches = np.squeeze(matches_dict[key])
            # only write when matches are given
            if matches.ndim == 2:
                f.write(image_1_name + ' ' + image_2_name + '\n')
                for _i in range(matches.shape[1]):
                    f.write(
                        str(matches[0, _i]) + ' ' + str(matches[1, _i]) + '\n')
                f.write('\n')
    f.close()

    # COLMAP runs -- wrapped in try except to throw errors if subprocess fails
    # and then clean up the colmap temp directory

    try:
        print('COLMAP Feature Import')
        cmd = ['colmap', 'feature_importer']
        cmd += [
            '--database_path',
            os.path.join(colmap_output_path, 'databases.db')
        ]
        cmd += ['--image_path', os.path.join(colmap_temp_path, 'images')]
        cmd += ['--import_path', os.path.join(colmap_temp_path, 'features')]
        colmap_res = subprocess.run(cmd)
        if colmap_res.returncode != 0:
            raise RuntimeError(' -- COLMAP failed to import features!')

        print('COLMAP Match Import')
        cmd = ['colmap', 'matches_importer']
        cmd += [
            '--database_path',
            os.path.join(colmap_output_path, 'databases.db')
        ]
        cmd += [
            '--match_list_path',
            os.path.join(colmap_temp_path, 'matches.txt')
        ]
        cmd += ['--match_type', 'raw']
        cmd += ['--SiftMatching.use_gpu', '0']
        colmap_res = subprocess.run(cmd)
        if colmap_res.returncode != 0:
            raise RuntimeError(' -- COLMAP failed to import matches!')

        print('COLMAP Mapper')
        cmd = ['colmap', 'mapper']
        cmd += ['--image_path', os.path.join(colmap_temp_path, 'images')]
        cmd += [
            '--database_path',
            os.path.join(colmap_output_path, 'databases.db')
        ]
        cmd += ['--output_path', colmap_output_path]
        cmd += ['--Mapper.min_model_size', str(cfg.colmap_min_model_size)]
        colmap_res = subprocess.run(cmd)
        if colmap_res.returncode != 0:
            raise RuntimeError(' -- COLMAP failed to run mapper!')

        # Delete temp directory after working
        rmtree(colmap_temp_path)

    except Exception as err:
        # Remove colmap output path and temp path
        rmtree(colmap_temp_path)
        rmtree(colmap_output_path)

        # Re-throw error
        print(err)
        raise RuntimeError('Parts of colmap runs returns failed state!')

    print('Checking validity of the colmap run just in case')

    # Check validity of colmap reconstruction for all of them
    is_any_colmap_valid = False
    idx_list = [
        os.path.join(colmap_output_path, _d)
        for _d in os.listdir(colmap_output_path)
        if os.path.isdir(os.path.join(colmap_output_path, _d))
    ]
    for idx in idx_list:
        colmap_img_file = os.path.join(idx, 'images.bin')
        if is_colmap_img_valid(colmap_img_file):
            is_any_colmap_valid = True
            break
    if not is_any_colmap_valid:
        print('Error in reading colmap output -- '
              'removing colmap output directory')
        rmtree(colmap_output_path)
Exemple #5
0
def main(cfg):
    '''Main function to compute matches.

    Parameters
    ----------
    cfg: Namespace
        Configurations for running this part of the code.

    '''

    # Get data directory
    data_dir = get_data_path(cfg)

    # Load pre-computed pairs with the new visibility criteria
    pairs_per_th = get_pairs_per_threshold(data_dir)

    # Check if all files exist
    if is_stereo_complete(cfg):
        print(' -- already exists, skipping stereo eval')
        return

    # Load keypoints and matches
    keypoints_dict = load_h5(get_kp_file(cfg))
    matches_dict = load_h5(get_match_file(cfg))
    geom_dict = load_h5(get_geom_file(cfg))
    geom_inl_dict = load_h5(get_geom_inl_file(cfg))

    filter_matches_dict = load_h5(get_filter_match_file(cfg))

    # Load visiblity and images
    images_list = get_fullpath_list(data_dir, 'images')
    vis_list = get_fullpath_list(data_dir, 'visibility')
    if cfg.dataset != 'googleurban':
        depth_maps_list = get_fullpath_list(data_dir, 'depth_maps')
    image_names = get_item_name_list(images_list)

    # Load camera information
    calib_list = get_fullpath_list(data_dir, 'calibration')
    calib_dict = load_calib(calib_list)

    # Generate all possible pairs
    print('Generating list of all possible pairs')
    pairs = compute_image_pairs(vis_list, len(image_names), cfg.vis_th)
    print('Old pairs with the point-based visibility threshold: {} '
          '(for compatibility)'.format(len(pairs)))
    for k, v in pairs_per_th.items():
        print('New pairs at visibility threshold {}: {}'.format(k, len(v)))

    # Evaluate each stereo pair in parallel
    # Compute it for all pairs (i.e. visibility threshold 0)
    print('Compute stereo metrics for all pairs')
    #num_cores = int(multiprocessing.cpu_count() * 0.9)
    num_cores = int(len(os.sched_getaffinity(0)) * 0.9)

    result = Parallel(n_jobs=num_cores)(delayed(compute_stereo_metrics_from_E)(
        images_list[image_names.index(pair.split('-')[0])], images_list[
            image_names.index(pair.split('-')[1])],
        depth_maps_list[image_names.index(pair.split('-')[0])] if cfg.
        dataset != 'googleurban' else None, depth_maps_list[image_names.index(
            pair.split('-')[1])] if cfg.dataset != 'googleurban' else None,
        np.asarray(keypoints_dict[pair.split('-')[0]]),
        np.asarray(keypoints_dict[pair.split('-')[1]]), calib_dict[pair.split(
            '-')[0]], calib_dict[pair.split('-')
                                 [1]], geom_dict[pair], matches_dict[pair],
        filter_matches_dict[pair], geom_inl_dict[pair], cfg)
                                        for pair in tqdm(pairs_per_th['0.0']))

    # Convert previous visibility list to strings
    old_keys = []
    for pair in pairs:
        old_keys.append('{}-{}'.format(image_names[pair[0]],
                                       image_names[pair[1]]))

    # Extract scores, err_q, err_t from results
    all_keys = pairs_per_th['0.0']
    err_dict, rep_s_dict = {}, {}
    geo_s_dict_pre_match, geo_s_dict_refined_match, \
        geo_s_dict_final_match = {}, {}, {}
    true_s_dict_pre_match, true_s_dict_refined_match, \
        true_s_dict_final_match = {}, {}, {}
    for i in range(len(result)):
        if all_keys[i] in old_keys:
            if result[i][5]:
                geo_s_dict_pre_match[
                    all_keys[i]] = result[i][0][0] if result[i][0] else None
                geo_s_dict_refined_match[
                    all_keys[i]] = result[i][0][1] if result[i][0] else None
                geo_s_dict_final_match[
                    all_keys[i]] = result[i][0][2] if result[i][0] else None
                true_s_dict_pre_match[
                    all_keys[i]] = result[i][1][0] if result[i][1] else None
                true_s_dict_refined_match[
                    all_keys[i]] = result[i][1][1] if result[i][1] else None
                true_s_dict_final_match[
                    all_keys[i]] = result[i][1][2] if result[i][1] else None
                err_q = result[i][2]
                err_t = result[i][3]
                rep_s_dict[all_keys[i]] = result[i][4]
                err_dict[all_keys[i]] = [err_q, err_t]
    print('Aggregating results for the old visibility constraint: '
          '{}/{}'.format(len(geo_s_dict_pre_match), len(result)))

    # Repeat with the new visibility threshold
    err_dict_th, rep_s_dict_th = {}, {}
    geo_s_dict_pre_match_th, geo_s_dict_refined_match_th, \
        geo_s_dict_final_match_th = {}, {}, {}
    true_s_dict_pre_match_th, true_s_dict_refined_match_th, \
        true_s_dict_final_match_th = {}, {}, {}
    for th, cur_pairs in pairs_per_th.items():
        _err_dict, _rep_s_dict = {}, {}
        _geo_s_dict_pre_match, _geo_s_dict_refined_match, \
            _geo_s_dict_final_match = {}, {}, {}
        _true_s_dict_pre_match, _true_s_dict_refined_match, \
            _true_s_dict_final_match = {}, {}, {}
        for i in range(len(all_keys)):
            if len(cur_pairs) > 0 and all_keys[i] in cur_pairs:
                if result[i][5]:
                    _geo_s_dict_pre_match[all_keys[
                        i]] = result[i][0][0] if result[i][0] else None
                    _geo_s_dict_refined_match[all_keys[
                        i]] = result[i][0][1] if result[i][0] else None
                    _geo_s_dict_final_match[all_keys[
                        i]] = result[i][0][2] if result[i][0] else None
                    _true_s_dict_pre_match[all_keys[
                        i]] = result[i][1][0] if result[i][1] else None
                    _true_s_dict_refined_match[all_keys[
                        i]] = result[i][1][1] if result[i][1] else None
                    _true_s_dict_final_match[all_keys[
                        i]] = result[i][1][2] if result[i][1] else None
                    err_q = result[i][2]
                    err_t = result[i][3]
                    _rep_s_dict[
                        all_keys[i]] = result[i][4] if result[i][4] else None
                    _err_dict[all_keys[i]] = [err_q, err_t]
        geo_s_dict_pre_match_th[th] = _geo_s_dict_pre_match
        geo_s_dict_refined_match_th[th] = _geo_s_dict_refined_match
        geo_s_dict_final_match_th[th] = _geo_s_dict_final_match
        true_s_dict_pre_match_th[th] = _true_s_dict_pre_match
        true_s_dict_refined_match_th[th] = _true_s_dict_refined_match
        true_s_dict_final_match_th[th] = _true_s_dict_final_match
        err_dict_th[th] = _err_dict
        rep_s_dict_th[th] = _rep_s_dict
        print('Aggregating results for threshold "{}": {}/{}'.format(
            th, len(geo_s_dict_pre_match_th[th]), len(result)))

    # Create results folder if it does not exist
    if not os.path.exists(get_stereo_path(cfg)):
        os.makedirs(get_stereo_path(cfg))

    # Finally, save packed scores and errors
    if cfg.dataset != 'googleurban':
        save_h5(geo_s_dict_pre_match, get_stereo_epipolar_pre_match_file(cfg))
        save_h5(geo_s_dict_refined_match,
                get_stereo_epipolar_refined_match_file(cfg))
        save_h5(geo_s_dict_final_match,
                get_stereo_epipolar_final_match_file(cfg))

        save_h5(true_s_dict_pre_match,
                get_stereo_depth_projection_pre_match_file(cfg))
        save_h5(true_s_dict_refined_match,
                get_stereo_depth_projection_refined_match_file(cfg))
        save_h5(true_s_dict_final_match,
                get_stereo_depth_projection_final_match_file(cfg))
        save_h5(rep_s_dict, get_repeatability_score_file(cfg))
    save_h5(err_dict, get_stereo_pose_file(cfg))

    for th in pairs_per_th:
        if cfg.dataset != 'googleurban':
            save_h5(geo_s_dict_pre_match_th[th],
                    get_stereo_epipolar_pre_match_file(cfg, th))
            save_h5(geo_s_dict_refined_match_th[th],
                    get_stereo_epipolar_refined_match_file(cfg, th))
            save_h5(geo_s_dict_final_match_th[th],
                    get_stereo_epipolar_final_match_file(cfg, th))
            save_h5(true_s_dict_pre_match_th[th],
                    get_stereo_depth_projection_pre_match_file(cfg, th))
            save_h5(true_s_dict_refined_match_th[th],
                    get_stereo_depth_projection_refined_match_file(cfg, th))
            save_h5(true_s_dict_final_match_th[th],
                    get_stereo_depth_projection_final_match_file(cfg, th))
            save_h5(rep_s_dict_th[th], get_repeatability_score_file(cfg, th))
        save_h5(err_dict_th[th], get_stereo_pose_file(cfg, th))
Exemple #6
0
    # Extract match mask
    save_match_inlier(sfm_cfg, key_list, mask_dict)


if __name__ == '__main__':
    cfg, unparsed = get_config()

    # If we have unparsed arguments, print usage and exit
    if len(unparsed) > 0:
        print_usage()
        exit(1)

    if not os.path.exists(get_filter_path(cfg)):
        os.makedirs(get_filter_path(cfg))

    cur_key = 'config_{}_{}'.format(cfg.dataset, cfg.task)
    if cur_key not in cfg.method_dict:
        raise ValueError('Cannot find "{}"'.format(cur_key))
    cur_filter = cfg.method_dict[cur_key]['outlier_filter']

    if cur_filter['method'] == 'cne-bp-nd':
        from third_party.cne.config import get_config as get_cne_config_from_cne
        from third_party.cne.network import MyNetwork
        from third_party.cne.geom import get_sampsons
        cne_interface(cfg)
    elif cur_filter['method'] == 'none':
        copyfile(get_match_file(cfg), get_filter_match_file(cfg))
        save_h5({'cost': 0.0}, get_filter_cost_file(cfg))
    else:
        raise ValueError('Unknown prefilter type')