def get_num_in_bag(cfg):
    '''Retrieve number of bags per subset from the json file.'''

    scene_list = load_json(
        getattr(cfg, 'scenes_{}_{}'.format(cfg.dataset, cfg.subset)))
    bag_size_json = load_json(
        getattr(cfg, 'splits_{}_{}'.format(cfg.dataset, cfg.subset)))
    bag_size_list = [b['bag_size'] for b in bag_size_json]
    bag_size_num = [b['num_in_bag'] for b in bag_size_json]

    return bag_size_num[bag_size_list.index(cfg.bag_size)]
def validate_json(json_path, datasets, logger):
    # check if json file exist
    if not os.path.isfile(json_path):
        logger.add_new_log('Submission does not contain json file')
        return
    # load json
    try:
        method_list = load_json(json_path)
    except:
        logger.add_new_log(
            'Following error occurs when loading json : \n   {}'.format(
                sys.exc_info()))
        return

    # validate json
    if not type(method_list) is list:
        logger.add_new_log(
            'Json should contain a list of method, please refer to the example json file.'
        )
        return

    for i, method in enumerate(method_list):
        print('Validating method {}/{}: "{}"'.format(
            i + 1, len(method_list), method['config_common']['json_label']))
        try:
            validate_method(method, is_challenge=True, datasets=datasets)
        except:
            logger.add_new_log(
                'Following error occurs when validating json : \n   {}'.format(
                    sys.exc_info()))
예제 #3
0
def compute_num_input_matches(res_dict, deprecated_images, cfg):
    '''Save the number of input matches given to Colmap.'''

    # TODO fix this after re-implementing custom matches
    # if cfg.method_dict['config_{}_{}'.format(cfg.dataset,
    #                                          cfg.task)]['use_custom_matches']:
    #     raise NotImplementedError(
    #         'TODO Load the right dict with custom matches')

    # Read match dict
    matches_dict = load_h5_valid_image(
        get_filter_match_file(cfg) ,deprecated_images)

    # For every bag, compute the number of matches going into colmap
    bag_size_json = load_json(
        getattr(cfg, 'splits_{}_{}'.format(cfg.dataset, cfg.subset)))
    bag_size_list = [b['bag_size'] for b in bag_size_json]
    bag_size_num = [b['num_in_bag'] for b in bag_size_json]

    # Average it per bag size first, then across all bag sizes
    num_input_matches = []
    for bag_size, cur_bag_size_num in zip(bag_size_list, bag_size_num):
        num_input_matches_bagsize = []
        for bag_id in range(cur_bag_size_num):
            cfg_bag = deepcopy(cfg)
            cfg_bag.bag_size = bag_size
            cfg_bag.bag_id = bag_id

            # Skip if bag contain deprecated images
            if not valid_bag(cfg_bag, deprecated_images):
                continue

            images = get_colmap_image_path_list(cfg_bag)
            keys = [os.path.splitext(os.path.basename(im))[0] for im in images]
            pairs = []
            for i in range(len(keys)):
                for j in range(i + 1, len(keys)):
                    pairs.append('-'.join(
                        sorted([keys[i], keys[j]], reverse=True)))
            for pair in pairs:
                num_input_matches_bagsize.append(matches_dict[pair].shape[-1])
        num_input_matches.append(np.mean(num_input_matches_bagsize))

    res_dict['num_input_matches'] = np.mean(num_input_matches)
예제 #4
0
def main(cfg):
    '''Visualization of stereo keypoints and matches.

    Parameters
    ----------
    cfg: Namespace
        Configurations for running this part of the code.

    '''

    # Files should not be named to prevent (easy) abuse
    # Instead we use 0, ..., cfg.num_viz_stereo_pairs
    viz_folder_hq, viz_folder_lq = get_stereo_viz_folder(cfg)

    print(' -- Visualizations, stereo: "{}/{}"'.format(cfg.dataset, cfg.scene))
    t_start = time()

    # Load deprecated images list
    deprecated_images_all = load_json(cfg.json_deprecated_images)
    if cfg.dataset in deprecated_images_all and cfg.scene in deprecated_images_all[
            cfg.dataset]:
        deprecated_images = deprecated_images_all[cfg.dataset][cfg.scene]
    else:
        deprecated_images = []

    # Load keypoints, matches and errors
    keypoints_dict = load_h5_valid_image(get_kp_file(cfg), deprecated_images)
    matches_dict = load_h5_valid_image(get_match_file(cfg), deprecated_images)
    ransac_inl_dict = load_h5_valid_image(get_geom_inl_file(cfg),
                                          deprecated_images)

    # Hacky: We need to recompute the errors, loading only for the keys
    data_dir = get_data_path(cfg)
    pairs_all = get_pairs_per_threshold(data_dir)['0.1']
    pairs = []
    for pair in pairs_all:
        if all([key not in deprecated_images for key in pair.split('-')]):
            pairs += [pair]

    # Create results folder if it does not exist
    if not os.path.exists(viz_folder_hq):
        os.makedirs(viz_folder_hq)
    if not os.path.exists(viz_folder_lq):
        os.makedirs(viz_folder_lq)

    # Sort alphabetically and pick different images
    sorted_keys = sorted(pairs)
    picked = []
    pairs = []
    for pair in sorted_keys:
        fn1, fn2 = pair.split('-')
        if fn1 not in picked and fn2 not in picked:
            picked += [fn1, fn2]
            pairs += [pair]
        if len(pairs) == cfg.num_viz_stereo_pairs:
            break

    # Load depth maps
    depth = {}
    if cfg.dataset != 'googleurban':
        for pair in pairs:
            files = pair.split('-')
            for f in files:
                if f not in depth:
                    depth[f] = load_depth(
                        os.path.join(data_dir, 'depth_maps',
                                     '{}.h5'.format(f)))

    # Generate and save the images
    for i, pair in enumerate(pairs):
        # load metadata
        fn1, fn2 = pair.split('-')
        calib_dict = load_calib([
            os.path.join(data_dir, 'calibration',
                         'calibration_{}.h5'.format(fn1)),
            os.path.join(data_dir, 'calibration',
                         'calibration_{}.h5'.format(fn2))
        ])
        calc1 = calib_dict[fn1]
        calc2 = calib_dict[fn2]
        inl = ransac_inl_dict[pair]

        # Get depth for keypoints
        kp1 = keypoints_dict[fn1]
        kp2 = keypoints_dict[fn2]
        # Normalize keypoints
        kp1n = normalize_keypoints(kp1, calc1['K'])
        kp2n = normalize_keypoints(kp2, calc2['K'])

        # Get {R, t} from calibration information
        R_1, t_1 = calc1['R'], calc1['T'].reshape((3, 1))
        R_2, t_2 = calc2['R'], calc2['T'].reshape((3, 1))

        # Compute dR, dt
        dR = np.dot(R_2, R_1.T)
        dT = t_2 - np.dot(dR, t_1)

        if cfg.dataset == 'phototourism':
            kp1_int = np.round(kp1).astype(int)
            kp2_int = np.round(kp2).astype(int)

            kp1_int[:, 1] = np.clip(kp1_int[:, 1], 0, depth[fn1].shape[0] - 1)
            kp1_int[:, 0] = np.clip(kp1_int[:, 0], 0, depth[fn1].shape[1] - 1)
            kp2_int[:, 1] = np.clip(kp2_int[:, 1], 0, depth[fn2].shape[0] - 1)
            kp2_int[:, 0] = np.clip(kp2_int[:, 0], 0, depth[fn2].shape[1] - 1)
            d1 = np.expand_dims(depth[fn1][kp1_int[:, 1], kp1_int[:, 0]],
                                axis=-1)
            d2 = np.expand_dims(depth[fn2][kp2_int[:, 1], kp2_int[:, 0]],
                                axis=-1)

            # Project with depth
            kp1n_p, kp2n_p = get_projected_kp(kp1n, kp2n, d1, d2, dR, dT)
            kp1_p = unnormalize_keypoints(kp1n_p, calc2['K'])
            kp2_p = unnormalize_keypoints(kp2n_p, calc1['K'])

            # Re-index keypoints from matches
            kp1_inl = kp1[inl[0]]
            kp2_inl = kp2[inl[1]]
            kp1_p_inl = kp1_p[inl[0]]
            kp2_p_inl = kp2_p[inl[1]]
            kp1n_inl = kp1n[inl[0]]
            kp2n_inl = kp2n[inl[1]]
            kp1n_p_inl = kp1n_p[inl[0]]
            kp2n_p_inl = kp2n_p[inl[1]]
            d1_inl = d1[inl[0]]
            d2_inl = d2[inl[1]]

            # Filter out keypoints with invalid depth
            nonzero_index = np.nonzero(np.squeeze(d1_inl * d2_inl))
            zero_index = np.where(np.squeeze(d1_inl * d2_inl) == 0)[0]
            kp1_inl_nonzero = kp1_inl[nonzero_index]
            kp2_inl_nonzero = kp2_inl[nonzero_index]
            kp1_p_inl_nonzero = kp1_p_inl[nonzero_index]
            kp2_p_inl_nonzero = kp2_p_inl[nonzero_index]
            kp1n_inl_nonzero = kp1n_inl[nonzero_index]
            kp2n_inl_nonzero = kp2n_inl[nonzero_index]
            kp1n_p_inl_nonzero = kp1n_p_inl[nonzero_index]
            kp2n_p_inl_nonzero = kp2n_p_inl[nonzero_index]
            # Compute symmetric distance using the depth image
            d = get_truesym(kp1_inl_nonzero, kp2_inl_nonzero,
                            kp1_p_inl_nonzero, kp2_p_inl_nonzero)
        else:
            # All points are valid for computing the epipolar distance.
            zero_index = []

            # Compute symmetric epipolar distance for every match.
            kp1_inl_nonzero = kp1[inl[0]]
            kp2_inl_nonzero = kp2[inl[1]]
            kp1n_inl_nonzero = kp1n[inl[0]]
            kp2n_inl_nonzero = kp2n[inl[1]]
            # d = np.zeros(inl.shape[1])
            d = get_episym(kp1n_inl_nonzero, kp2n_inl_nonzero, dR, dT)

        # canvas
        im, v_offset, h_offset = build_composite_image(
            os.path.join(
                data_dir, 'images',
                fn1 + ('.png' if cfg.dataset == 'googleurban' else '.jpg')),
            os.path.join(
                data_dir, 'images',
                fn2 + ('.png' if cfg.dataset == 'googleurban' else '.jpg')),
            margin=5,
            axis=1 if
            (not cfg.viz_composite_vert or cfg.dataset == 'googleurban'
             or cfg.dataset == 'pragueparks') else 0)

        plt.figure(figsize=(10, 10))
        plt.imshow(im)
        linewidth = 2

        # Plot matches on points without depth
        for idx in range(len(zero_index)):
            plt.plot(
                (kp1_inl[idx, 0] + h_offset[0], kp2_inl[idx, 0] + h_offset[1]),
                (kp1_inl[idx, 1] + v_offset[0], kp2_inl[idx, 1] + v_offset[1]),
                color='b',
                linewidth=linewidth)

        # Plot matches
        # Points are normalized by the focals, which are on average ~670.

        max_dist = 5
        if cfg.dataset == 'googleurban':
            max_dist = 2e-4
        if cfg.dataset == 'pragueparks':
            max_dist = 2e-4
        cmap = matplotlib.cm.get_cmap('summer')
        order = list(range(len(d)))
        random.shuffle(order)
        for idx in order:
            if d[idx] <= max_dist:
                min_val = 0
                max_val = 255 - min_val
                col = cmap(
                    int(max_val * (1 - (max_dist - d[idx]) / max_dist) +
                        min_val))
                # col = cmap(255 * (max_dist - d[idx]) / max_dist)
            else:
                col = 'r'
            plt.plot((kp1_inl_nonzero[idx, 0] + h_offset[0],
                      kp2_inl_nonzero[idx, 0] + h_offset[1]),
                     (kp1_inl_nonzero[idx, 1] + v_offset[0],
                      kp2_inl_nonzero[idx, 1] + v_offset[1]),
                     color=col,
                     linewidth=linewidth)

        plt.tight_layout()
        plt.axis('off')
        viz_file_hq = os.path.join(viz_folder_hq, '{:05d}.png'.format(i))
        viz_file_lq = os.path.join(viz_folder_lq, '{:05d}.jpg'.format(i))
        plt.savefig(viz_file_hq, bbox_inches='tight')

        # Convert with imagemagick
        os.system('convert -quality 75 -resize \"500>\" {} {}'.format(
            viz_file_hq, viz_file_lq))

        plt.close()

    print('Done [{:.02f} s.]'.format(time() - t_start))
def main(cfg):
    '''Main function. Takes config as input.
    '''

    # Back up config
    cfg_orig = deepcopy(cfg)
    method = cfg_orig.method_dict

    # Add config options to the dict
    master_dict = OrderedDict()
    master_dict['config'] = method

    # Add date
    master_dict['properties'] = OrderedDict()
    master_dict['properties'][
        'processing_date'] = pack_helper.get_current_date()
    print('Adding processing date: {}'.format(
        master_dict['properties']['processing_date']))

    # Add submission flag
    master_dict['properties']['is_submission'] = cfg.is_submission
    print('Flagging as user submission: {}'.format(cfg.is_submission))

    # Add descriptor properties
    cfg_desc = deepcopy(cfg_orig)
    cfg_desc.dataset = 'phototourism'
    cfg_desc.scene = 'british_museum'
    try:
        descriptors_dict = load_h5(get_desc_file(cfg_desc))
        desc_type, desc_size, desc_nbytes = pack_helper.get_descriptor_properties(
            cfg_desc, descriptors_dict)
    except Exception:
        desc_type = 'none'
        desc_size = 0
        desc_nbytes = 0
    master_dict['properties']['descriptor_type'] = desc_type
    master_dict['properties']['descriptor_size'] = desc_size
    master_dict['properties']['descriptor_nbytes'] = desc_nbytes
    print('Adding descriptor properties: {} {} ({} bytes)'.format(
        master_dict['properties']['descriptor_size'],
        master_dict['properties']['descriptor_type'],
        master_dict['properties']['descriptor_nbytes']))

    deprecated_images_all = load_json(cfg.json_deprecated_images)
    if cfg.dataset in deprecated_images_all and cfg.scene in deprecated_images_all[
            cfg.dataset]:
        deprecated_images = deprecated_images_all[cfg.dataset][cfg.scene]
    else:
        deprecated_images = []

    # Read data and splits
    DATASET_LIST = ['phototourism', 'pragueparks', 'googleurban']
    for dataset in DATASET_LIST:
        # Skip if not in config
        if 'config_{}_stereo'.format(
                dataset) not in method and 'config_{}_multiview'.format(
                    dataset) not in method:
            continue

        # Create empty dictionary
        master_dict[dataset] = OrderedDict()
        res_dict = OrderedDict()
        master_dict[dataset]['results'] = res_dict

        # Save number of runs
        master_dict[dataset]['num_runs_stereo'] = getattr(
            cfg_orig, 'num_runs_{}_stereo'.format(cfg_orig.subset))
        master_dict[dataset]['num_runs_multiview'] = getattr(
            cfg_orig, 'num_runs_{}_multiview'.format(cfg_orig.subset))

        # Load data config
        scene_list = load_json(
            getattr(cfg_orig, 'scenes_{}_{}'.format(dataset, cfg_orig.subset)))
        bag_size_json = load_json(
            getattr(cfg_orig, 'splits_{}_{}'.format(dataset, cfg_orig.subset)))
        bag_size_list = [b['bag_size'] for b in bag_size_json]
        bag_size_num = [b['num_in_bag'] for b in bag_size_json]
        bag_size_str = ['{}bag'.format(b) for b in bag_size_list]

        # Create empty dicts
        for scene in ['allseq'] + scene_list:
            res_dict[scene] = OrderedDict()
            for task in ['stereo', 'multiview']:
                res_dict[scene][task] = OrderedDict()
                res_dict[scene][task]['run_avg'] = OrderedDict()
                if task == 'multiview':
                    for bag in bag_size_str + ['bag_avg']:
                        res_dict[scene]['multiview']['run_avg'][
                            bag] = OrderedDict()

        # Stereo -- multiple runs
        t = time()
        cur_key = 'config_{}_stereo'.format(dataset)
        if cfg_orig.eval_stereo and cur_key in method and method[cur_key]:
            num_runs = getattr(cfg_orig,
                               'num_runs_{}_stereo'.format(cfg_orig.subset))
            cfg = deepcopy(cfg_orig)
            cfg.dataset = dataset
            cfg.task = 'stereo'
            for scene in scene_list:
                cfg.scene = scene

                res_dict[scene]['stereo']['run_avg'] = OrderedDict()
                for run in range(num_runs):
                    res_dict[scene]['stereo']['run_{}'.format(
                        run)] = OrderedDict()

                # Create list of things to gather
                metric_list = []
                metric_list += ['avg_num_keypoints']
                # metric_list += ['matching_scores_epipolar']
                metric_list += ['num_inliers']
                if dataset != 'googleurban':
                    metric_list += ['matching_scores_depth_projection']
                    metric_list += ['repeatability']
                metric_list += ['qt_auc']
                metric_list += ['timings']

                for run in range(num_runs):
                    # Compute and pack results
                    cfg.run = run
                    cur_dict = res_dict[scene]['stereo']['run_{}'.format(run)]
                    for metric in metric_list:
                        t_cur = time()
                        getattr(pack_helper,
                                'compute_' + metric)(cur_dict,
                                                     deprecated_images, cfg)
                        print(
                            ' -- Packing "{}"/"{}"/stereo, run: {}/{}, metric: {} [{:.02f} s]'
                            .format(dataset, scene, run + 1, num_runs, metric,
                                    time() - t_cur))

            # Compute average across runs, for stereo
            t_cur = time()
            pack_helper.average_stereo_over_runs(cfg, res_dict, num_runs)
            print(
                ' -- Packing "{}"/stereo: averaging over {} run(s) [{:.02f} s]'
                .format(dataset, num_runs,
                        time() - t_cur))

            # Compute average across scenes, for stereo
            t_cur = time()
            pack_helper.average_stereo_over_scenes(cfg, res_dict, num_runs)
            print(
                ' -- Packing "{}"/stereo: averaging over {} scene(s) [{:.02f} s]'
                .format(dataset, len(scene_list),
                        time() - t_cur))

            print(' -- Finished packing stereo in {:.01f} sec.'.format(time() -
                                                                       t))
        else:
            print('Skipping "{}/stereo"'.format(dataset))

        # Multiview -- multiple runs
        t = time()
        cur_key = 'config_{}_multiview'.format(dataset)
        if cfg_orig.eval_multiview and cur_key in method and method[cur_key]:
            num_runs = getattr(cfg, 'num_runs_{}_multiview'.format(cfg.subset))
            cfg = deepcopy(cfg_orig)
            cfg.dataset = dataset
            cfg.task = 'multiview'
            for scene in scene_list:
                cfg.scene = scene

                for run in ['run_avg'
                            ] + ['run_{}'.format(f) for f in range(num_runs)]:
                    res_dict[scene]['multiview'][run] = OrderedDict()
                    for bags_label in ['bag_avg'] + bag_size_str:
                        res_dict[scene]['multiview'][run][
                            bags_label] = OrderedDict()

                # Create list of things to gather
                metric_list = []
                metric_list += ['avg_num_keypoints']
                metric_list += ['num_input_matches']
                metric_list += ['qt_auc_colmap']
                metric_list += ['ATE']
                metric_list += ['colmap_stats']

                for run in range(num_runs):
                    for bag_size in bag_size_list:
                        # Compute and pack results
                        cfg.run = run
                        cfg.bag_size = bag_size
                        cur_dict = res_dict[scene]['multiview']
                        for metric in metric_list:
                            t_cur = time()
                            getattr(pack_helper, 'compute_' + metric)(
                                cur_dict['run_{}'.format(run)]['{}bag'.format(
                                    bag_size)], deprecated_images, cfg)
                            print(
                                ' -- Packing "{}"/"{}"/multiview, run {}/{}, "{}", metric: {} [{:.02f} s]'
                                .format(dataset, scene, run + 1, num_runs,
                                        '{}bag'.format(bag_size), metric,
                                        time() - t_cur))

                        # Compute average across bags
                        any_key = random.choice([
                            key for key in cur_dict['run_{}'.format(run)]
                            if ('bag' in key and key != 'bag_avg')
                        ])
                        for metric in cur_dict['run_{}'.format(run)][any_key]:
                            pack_helper.average_multiview_over_bags(
                                cfg, cur_dict['run_{}'.format(run)],
                                bag_size_list)

            # Compute average across runs, for multiview
            t_cur = time()
            pack_helper.average_multiview_over_runs(cfg, res_dict, num_runs,
                                                    bag_size_str + ['bag_avg'])
            print(
                ' -- Packing "{}"/multiview: averaging over {} run(s) [{:.02f} s]'
                .format(dataset, num_runs,
                        time() - t_cur))

            # Compute average across scenes, for multiview
            t_cur = time()
            pack_helper.average_multiview_over_scenes(
                cfg, res_dict, num_runs, ['bag_avg'] + bag_size_str)
            print(
                ' -- Packing "{}"/multiview: averaging over {} scene(s) [{:.02f} s]'
                .format(dataset, len(scene_list),
                        time() - t_cur))

            print(' -- Finished packing multiview in {:.01f} sec.'.format(
                time() - t))
        else:
            print('Skipping "{}/multiview"'.format(dataset))

    # Add a unique identifier (equivalent to "submission id" in previous versions.
    if cfg.is_challenge:
        master_dict['uuid'] = get_uuid(cfg)

    # Dump packed result
    if not os.path.exists(cfg.path_pack):
        os.makedirs(cfg.path_pack)
    json_dump_file = os.path.join(
        cfg.path_pack,
        '{}.json'.format(cfg.method_dict['config_common']['json_label']))

    print(' -- Saving to: "{}"'.format(json_dump_file))
    with open(json_dump_file, 'w') as outfile:
        json.dump(master_dict, outfile, indent=2)

    # Add a short results summary.
    print()
    print('-- SUMMARY --')
    print('Subset: "{}"'.format(cfg.subset))
    for dataset in DATASET_LIST:
        print()
        print('Dataset "{}"'.format(dataset))
        if dataset in master_dict:
            # Stereo
            if 'stereo' in master_dict[dataset]['results'][
                    'allseq'] and cfg.eval_stereo:
                print('-- Stereo mAA(10 deg): {:.05f}'.format(
                    master_dict[dataset]['results']['allseq']['stereo']
                    ['run_avg']['qt_auc_10_th_0.1']['mean']))
                for scene in master_dict[dataset]['results']:
                    if scene != 'allseq':
                        print('---- Scene "{}" -> Stereo mAA(10 deg): {:.05f}'.
                              format(
                                  scene, master_dict[dataset]['results'][scene]
                                  ['stereo']['run_avg']['qt_auc_10_th_0.1']
                                  ['mean']))
            if 'multiview' in master_dict[dataset]['results'][
                    'allseq'] and cfg.eval_multiview:
                print('-- Multiview mAA(10 deg): {:.05f}'.format(
                    master_dict[dataset]['results']['allseq']['multiview']
                    ['run_avg']['bag_avg']['qt_auc_colmap_10']['mean']))
                for scene in master_dict[dataset]['results']:
                    if scene != 'allseq':
                        print(
                            '---- Scene "{}" -> Multiview mAA(10 deg): {:.05f}'
                            .format(
                                scene, master_dict[dataset]['results'][scene]
                                ['multiview']['run_avg']['bag_avg']
                                ['qt_auc_colmap_10']['mean']))
예제 #6
0
def main(cfg):
    ''' Main routine for the benchmark '''

    # Read data and splits
    for dataset in ['phototourism']:
        for subset in ['val', 'test']:
            setattr(cfg, 'scenes_{}_{}'.format(dataset, subset),
                    './json/data/{}_{}.json'.format(dataset, subset))
            setattr(cfg, 'splits_{}_{}'.format(dataset, subset),
                    './json/bag_size/{}_{}.json'.format(dataset, subset))

    # Read the list of methods and datasets
    method_list = load_json(cfg.json_method)
    for i, method in enumerate(method_list):
        print('Validating method {}/{}: "{}"'.format(
            i + 1, len(method_list), method['config_common']['json_label']))
        validate_method(method, is_challenge=cfg.is_challenge)

    # Back up original config
    cfg_orig = deepcopy(cfg)
    job_dict = {}

    # Loop over methods, datasets/scenes, and tasks
    for method in method_list:
        # accumulate packing dependencies over datasets and runs
        all_stereo_jobs = []
        all_multiview_jobs = []
        all_relocalization_jobs = []

        for dataset in ['phototourism']:
            # Load data config
            scene_list = load_json(
                getattr(cfg_orig,
                        'scenes_{}_{}'.format(dataset, cfg_orig.subset)))
            bag_size_json = load_json(
                getattr(cfg_orig,
                        'splits_{}_{}'.format(dataset, cfg_orig.subset)))
            bag_size_list = [b['bag_size'] for b in bag_size_json]
            bag_size_num = [b['num_in_bag'] for b in bag_size_json]

            for scene in scene_list:
                print('Working on {}: {}/{}'.format(
                    method['config_common']['json_label'], dataset, scene))

                # For each task
                for task in ['stereo', 'multiview', 'relocalization']:
                    # Skip if the key does not exist or it is empty
                    cur_key = 'config_{}_{}'.format(dataset, task)
                    if cur_key not in method or not method[cur_key]:
                        print(
                            'Empty config for "{}", skipping!'.format(cur_key))
                        continue

                    # Append method to config
                    cfg = deepcopy(cfg_orig)
                    cfg.method_dict = deepcopy(method)
                    cfg.dataset = dataset
                    cfg.task = task
                    cfg.scene = scene

                    # Features
                    feature_jobs = create_eval_jobs([], 'feature', cfg,
                                                    job_dict)

                    # Matches
                    match_jobs = create_eval_jobs(feature_jobs, 'match', cfg,
                                                  job_dict)

                    # Filter
                    match_inlier_jobs = create_eval_jobs(
                        match_jobs, 'filter', cfg, job_dict)

                    # Empty dependencies
                    stereo_jobs = []
                    multiview_jobs = []
                    relocalization_jobs = []

                    num_runs = getattr(
                        cfg, 'num_runs_{}_{}'.format(cfg.subset, task))
                    for run in range(num_runs):
                        cfg.run = run

                        # Pose estimation and stereo evaluation
                        if task == 'stereo' and cfg.eval_stereo:
                            geom_model_jobs = create_eval_jobs(
                                match_inlier_jobs, 'model', cfg, job_dict)
                            stereo_jobs += create_eval_jobs(
                                geom_model_jobs, 'stereo', cfg, job_dict)
                            all_stereo_jobs += stereo_jobs

                        # Visualization for stereo
                        if task == 'stereo' and cfg.run_viz:
                            eval_viz_stereo(stereo_jobs, cfg)

                        # Multiview
                        if task == 'multiview' and cfg.eval_multiview:
                            multiview_jobs += eval_multiview(
                                match_inlier_jobs, cfg, bag_size_list,
                                bag_size_num)
                            all_multiview_jobs += multiview_jobs

                        # Visualization for colmap
                        if task == 'multiview' and cfg.run_viz:
                            eval_viz_colmap(multiview_jobs, cfg)

                        if task == 'relocalization' and cfg.eval_relocalization:
                            raise NotImplementedError(
                                'TODO relocalization task')

        # Packing -- can be skipped with --skip_packing=True
        # For instance, when only generating visualizations
        if not cfg.skip_packing:
            cfg = deepcopy(cfg_orig)
            cfg.method_dict = deepcopy(method)
            eval_packing(
                all_stereo_jobs + all_multiview_jobs + all_relocalization_jobs,
                cfg)
예제 #7
0
def main(cfg):
    '''Visualization of colmap points.

    Parameters
    ----------
    cfg: Namespace
        Configurations for running this part of the code.

    '''

    bag_size_json = load_json(
        getattr(cfg, 'splits_{}_{}'.format(cfg.dataset, cfg.subset)))
    bag_size_list = [b['bag_size'] for b in bag_size_json]
    bag_size_num = [b['num_in_bag'] for b in bag_size_json]

    # # Do not re-run if files already exist -- off for now
    # skip = True
    # for _bag_size in bag_size_list:
    #     cfg_bag = deepcopy(cfg)
    #     cfg_bag.bag_size = _bag_size
    #     viz_folder_hq, viz_folder_lq = get_colmap_viz_folder(cfg_bag)
    #     for _bag_id in range(
    #             getattr(cfg_bag,
    #                     'num_viz_colmap_subsets_bagsize{}'.format(_bag_size))):
    #         if any([
    #                 not os.path.exists(
    #                     os.path.join(
    #                         viz_folder_lq,
    #                         'colmap-bagsize{:d}-bag{:02d}-image{:02d}.jpg'.
    #                         format(_bag_size, _bag_id, i)))
    #                 for i in range(_bag_size)
    #         ]):
    #             skip = False
    #             break
    #         if not os.path.exists(
    #                 os.path.join(
    #                     viz_folder_lq,
    #                     'colmap-bagsize{:d}-bag{:02d}.pcd'.format(
    #                         _bag_size, _bag_id))):
    #             skip = False
    #             break
    # if skip:
    #     print(' -- already exists, skipping colmap visualization')
    #     return

    print(' -- Visualizations, multiview: "{}/{}"'.format(
        cfg.dataset, cfg.scene))
    t_start = time()

    # Create results folder if it does not exist
    for _bag_size in bag_size_list:
        cfg_bag = deepcopy(cfg)
        cfg_bag.bag_size = _bag_size
        viz_folder_hq, viz_folder_lq = get_colmap_viz_folder(cfg_bag)
        if not os.path.exists(viz_folder_hq):
            os.makedirs(viz_folder_hq)
        if not os.path.exists(viz_folder_lq):
            os.makedirs(viz_folder_lq)

    # Load keypoints
    keypoints_dict = load_h5(get_kp_file(cfg))

    # Loop over bag sizes
    for _bag_size in bag_size_list:
        cfg_bag = deepcopy(cfg)
        cfg_bag.bag_size = _bag_size
        num_bags = getattr(
            cfg_bag, 'num_viz_colmap_subsets_bagsize{}'.format(_bag_size))
        for _bag_id in range(num_bags):
            print(
                ' -- Visualizations, multiview: "{}/{}", bag_size={}, bag {}/{}'
                .format(cfg.dataset, cfg.scene, _bag_size, _bag_id + 1,
                        num_bags))

            # Retrieve list of images
            cfg_bag.bag_id = _bag_id
            images_in_bag = get_colmap_image_path_list(cfg_bag)

            # Retrieve reconstruction
            colmap_output_path = get_colmap_output_path(cfg_bag)
            # is_colmap_valid = os.path.exists(
            #     os.path.join(colmap_output_path, '0'))
            best_index = get_best_colmap_index(cfg_bag)
            if best_index != -1:
                colmap_images = read_images_binary(
                    os.path.join(colmap_output_path, str(best_index),
                                 'images.bin'))
            for i, image_path in enumerate(images_in_bag):
                # Limit to 10 or so, even for bag size 25
                if i >= cfg.max_num_images_viz_multiview:
                    break

                # Load image and keypoints
                im, _ = load_image(image_path,
                                   use_color_image=True,
                                   crop_center=False,
                                   force_rgb=True)
                used = None
                key = os.path.splitext(os.path.basename(image_path))[0]
                if best_index != -1:
                    for j in colmap_images:
                        if key in colmap_images[j].name:
                            # plot all keypoints
                            used = colmap_images[j].point3D_ids != -1
                            break
                if used is None:
                    used = [False] * keypoints_dict[key].shape[0]
                used = np.array(used)

                fig = plt.figure(figsize=(20, 20))
                plt.imshow(im)
                plt.plot(keypoints_dict[key][~used, 0],
                         keypoints_dict[key][~used, 1],
                         'r.',
                         markersize=12)
                plt.plot(keypoints_dict[key][used, 0],
                         keypoints_dict[key][used, 1],
                         'b.',
                         markersize=12)
                plt.tight_layout()
                plt.axis('off')

                # TODO Ideally we would save to pdf
                # but it does not work on 16.04, so we do png instead
                # https://bugs.launchpad.net/ubuntu/+source/imagemagick/+bug/1796563
                viz_folder_hq, viz_folder_lq = get_colmap_viz_folder(cfg_bag)
                viz_file_hq = os.path.join(
                    viz_folder_hq,
                    'bagsize{:d}-bag{:02d}-image{:02d}.png'.format(
                        _bag_size, _bag_id, i))
                viz_file_lq = os.path.join(
                    viz_folder_lq,
                    'bagsize{:d}-bag{:02d}-image{:02d}.jpg'.format(
                        _bag_size, _bag_id, i))
                plt.savefig(viz_file_hq, bbox_inches='tight')

                # Convert with imagemagick
                os.system('convert -quality 75 -resize \"400>\" {} {}'.format(
                    viz_file_hq, viz_file_lq))

                plt.close()

            if best_index != -1:
                colmap_points = read_points3d_binary(
                    os.path.join(colmap_output_path, str(best_index),
                                 'points3D.bin'))
                points3d = []
                for k in colmap_points:
                    points3d.append([
                        colmap_points[k].xyz[0], colmap_points[k].xyz[1],
                        colmap_points[k].xyz[2]
                    ])
                points3d = np.array(points3d)
                points3d -= np.median(points3d, axis=0)[None, ...]
                points3d /= np.abs(points3d).max() + 1e-6
                pcd = os.path.join(
                    get_colmap_viz_folder(cfg_bag)[0],
                    'colmap-bagsize{:d}-bag{:02d}.pcd'.format(
                        _bag_size, _bag_id))
                with open(pcd, 'w') as f:
                    f.write('# .PCD v.7 - Point Cloud Data file format\n')
                    f.write('VERSION .7\n')
                    f.write('FIELDS x y z\n')
                    f.write('SIZE 4 4 4\n')
                    f.write('TYPE F F F\n')
                    f.write('COUNT 1 1 1\n')
                    f.write('WIDTH {}\n'.format(len(colmap_points)))
                    f.write('HEIGHT 1\n')
                    f.write('VIEWPOINT 0 0 0 1 0 0 0\n')
                    f.write('POINTS {}\n'.format(len(colmap_points)))
                    f.write('DATA ascii\n')
                    for p in points3d:
                        f.write('{:.05f} {:.05f} {:.05f}\n'.format(
                            p[0], p[1], p[2]))
                copyfile(
                    os.path.join(
                        get_colmap_viz_folder(cfg_bag)[0],
                        'colmap-bagsize{:d}-bag{:02d}.pcd'.format(
                            _bag_size, _bag_id)),
                    os.path.join(
                        get_colmap_viz_folder(cfg_bag)[1],
                        'colmap-bagsize{:d}-bag{:02d}.pcd'.format(
                            _bag_size, _bag_id)))

    print('done [{:.02f} s.]'.format(time() - t_start))
def validate_submission_files(sub_path, benchmark_repo_path, datasets,
                              raw_data_path, logger):
    for dataset in datasets:

        raw_dataset_path = os.path.join(raw_data_path, dataset)
        # check if dataset folder exists
        sub_dataset_path = os.path.join(sub_path, dataset)
        if not os.path.isdir(sub_dataset_path):
            logger.add_new_log(
                'Submission does not contain {} dataset.'.format(dataset))
            continue
        # read seqs from json
        seqs = load_json(
            os.path.join(benchmark_repo_path,
                         'json/data/{}_test.json'.format(dataset)))
        for seq in seqs:
            # get number of image
            raw_seq_path = os.path.join(raw_dataset_path, seq)
            im_list = [
                os.path.splitext(f)[0] for f in os.listdir(raw_seq_path)
                if (os.path.isfile(os.path.join(raw_seq_path, f))
                    and f.endswith(('png', 'jpg')))
            ]
            num_im = len(im_list)

            # get all key pairs
            key_pairs = [
                pair[0] + '-' + pair[1]
                for pair in list(product(im_list, im_list))
                if pair[0] > pair[1]
            ]

            # check if seq folder exists
            sub_seq_path = os.path.join(sub_dataset_path, seq)
            if not os.path.isdir(sub_seq_path):
                logger.add_new_log(
                    'Submission does not contain {} sequence in {}  dataset.'.
                    format(seq, dataset))
                continue
            # validate keypoints file
            kp_path = os.path.join(sub_seq_path, 'keypoints.h5')
            if not os.path.isfile(kp_path):
                logger.add_new_log(
                    'Submission does not contain keypoints file for {} sequence in {} dataset.'
                    .format(seq, dataset))
            else:
                keypoints = load_h5(kp_path)

                if sorted(list(keypoints.keys())) != sorted(im_list):
                    logger.add_new_log(
                        '{}-{}: Keypoints file does not contain all the image keys.'
                        .format(dataset, seq))
                if len(list(keypoints.values())[0].shape) != 2:
                    logger.add_new_log(
                        '{}-{}: Keypoints file is in wrong format.'.format(
                            dataset, seq))
                if list(keypoints.values())[0].shape[1] != 2:
                    logger.add_new_log(
                        '{}-{}: Keypoints file is in wrong format.'.format(
                            dataset, seq))
                # check number of keypoints
                if list(keypoints.values())[0].shape[0] > 8000:
                    logger.add_new_log(
                        '{}-{}: Keypoints file contains more than 8000 points.'
                        .format(dataset, seq))

            # check if match file exists first
            match_files = [
                file for file in os.listdir(sub_seq_path)
                if os.path.isfile(os.path.join(sub_seq_path, file))
                and file.startswith('match')
            ]

            # validate descriptor file
            desc_path = os.path.join(sub_seq_path, 'descriptors.h5')

            # much provide either descriptor file or match file
            if not os.path.isfile(desc_path) and len(match_files) == 0:
                logger.add_new_log(
                    'Submission does not contain descriptors file for {} sequence in {}  dataset.'
                    .format(seq, dataset))
            elif not os.path.isfile(desc_path):
                pass
            else:
                descriptors = load_h5(desc_path)

                if sorted(list(descriptors.keys())) != sorted(im_list):
                    logger.add_new_log(
                        '{}-{}: Descriptors file does not contain all the image keys.'
                        .format(dataset, seq))
                if len(list(descriptors.values())[0].shape) != 2:
                    logger.add_new_log(
                        '{}-{}: Descriptors file is in wrong format'.format(
                            dataset, seq))
                if list(descriptors.values())[0].shape[1] < 64 or list(
                        descriptors.values())[0].shape[1] > 2048:
                    logger.add_new_log(
                        '{}-{}: Descriptors file is in wrong format'.format(
                            dataset, seq))

                # check descriptor size
                desc_type, desc_size, desc_nbytes = get_descriptor_properties(
                    {}, descriptors)
                if desc_nbytes > 512 and len(match_files) == 0:
                    logger.add_new_log(
                        '{}-{}: Descriptors size is larger than 512 bytes, you need to provide custom match file'
                        .format(dataset, seq))

            # validate match file
            # check match file name
            if 'matches.h5' in match_files:
                if len(match_files) != 1:
                    logger.add_new_log(
                        '{}-{}: matches.h5 exists. Do not need to provide any other match files.'
                        .format(dataset, seq))
            elif 'matches_multiview.h5' in match_files or 'matches_stereo_0.h5' in match_files or 'matches_stereo.h5' in match_files:
                if 'matches_multiview.h5' not in match_files:
                    logger.add_new_log(
                        '{}-{}: missing matches_multiview.h5'.format(
                            dataset, seq))
                if 'matches_stereo_0.h5' not in match_files and 'matches_stereo.h5' not in match_files:
                    logger.add_new_log(
                        '{}-{}: missing matches_stereo.h5'.format(
                            dataset, seq))
                if 'matches_stereo_1.h5' in match_files or 'matches_stereo_2.h5' in match_files:
                    logger.add_new_log(
                        '{}-{}: for 2021 challenge, we only run stereo once, no need to provide matches_stereo_1 and matches_stereo_2'
                        .format(dataset, seq))

            for match_file in match_files:
                matches = load_h5(os.path.join(sub_seq_path, match_file))
                if len(matches.keys()) != len(key_pairs):
                    logger.add_new_log(
                        '{}-{}: Matches file contains wrong number of keys, should have {} keys, have {}.'
                        .format(dataset, seq, len(key_pairs),
                                len(matches.keys())))
                elif sorted(list(matches.keys())) != sorted(key_pairs):
                    logger.add_new_log(
                        '{}-{}: Matches file contains worng keys, maybe the image names is in reverse order. Plase refer to submission instruction for proper custom match key naming convention'
                        .format(dataset, seq))
                if len(list(matches.values())[0].shape) != 2:
                    logger.add_new_log(
                        '{}-{}: Matches file is in wrong format.'.format(
                            dataset, seq))
                if list(matches.values())[0].shape[0] != 2:
                    logger.add_new_log(
                        '{}-{}: Matches file is in wrong format.'.format(
                            dataset, seq))
예제 #9
0
def main(cfg):
    '''Main function. Takes config as input.
    '''

    # Back up config
    cfg_orig = deepcopy(cfg)
    method = cfg_orig.method_dict

    # Add config options to the dict
    master_dict = OrderedDict()
    master_dict['config'] = method

    # Add date
    master_dict['properties'] = OrderedDict()
    master_dict['properties'][
        'processing_date'] = pack_helper.get_current_date()
    print('Adding processing date: {}'.format(
        master_dict['properties']['processing_date']))

    # Add descriptor properties
    cfg_desc = deepcopy(cfg_orig)
    cfg_desc.dataset = 'phototourism'
    cfg_desc.scene = 'british_museum'
    try:
        descriptors_dict = load_h5(get_desc_file(cfg_desc))
        desc_type, desc_size, desc_nbytes = pack_helper.get_descriptor_properties(
            cfg_desc, descriptors_dict)
    except Exception:
        desc_type = 'none'
        desc_size = 0
        desc_nbytes = 0
    master_dict['properties']['descriptor_type'] = desc_type
    master_dict['properties']['descriptor_size'] = desc_size
    master_dict['properties']['descriptor_nbytes'] = desc_nbytes
    print('Adding descriptor properties: {} {} ({} bytes)'.format(
        master_dict['properties']['descriptor_size'],
        master_dict['properties']['descriptor_type'],
        master_dict['properties']['descriptor_nbytes']))

    # Read data and splits
    for dataset in ['phototourism']:
        setattr(cfg_orig, 'scenes_{}_{}'.format(dataset, cfg_orig.subset),
                './json/data/{}_{}.json'.format(dataset, cfg_orig.subset))
        setattr(cfg_orig, 'splits_{}_{}'.format(dataset, cfg_orig.subset),
                './json/bag_size/{}_{}.json'.format(dataset, cfg_orig.subset))

        # Create empty dictionary
        master_dict[dataset] = OrderedDict()
        res_dict = OrderedDict()
        master_dict[dataset]['results'] = res_dict

        # Save number of runs
        master_dict[dataset]['num_runs_stereo'] = getattr(
            cfg_orig, 'num_runs_{}_stereo'.format(cfg_orig.subset))
        master_dict[dataset]['num_runs_multiview'] = getattr(
            cfg_orig, 'num_runs_{}_multiview'.format(cfg_orig.subset))

        # Load data config
        scene_list = load_json(
            getattr(cfg_orig, 'scenes_{}_{}'.format(dataset, cfg_orig.subset)))
        bag_size_json = load_json(
            getattr(cfg_orig, 'splits_{}_{}'.format(dataset, cfg_orig.subset)))
        bag_size_list = [b['bag_size'] for b in bag_size_json]
        bag_size_num = [b['num_in_bag'] for b in bag_size_json]
        bag_size_str = ['{}bag'.format(b) for b in bag_size_list]

        # Create empty dicts
        for scene in ['allseq'] + scene_list:
            res_dict[scene] = OrderedDict()
            for task in ['stereo', 'multiview', 'relocalization']:
                res_dict[scene][task] = OrderedDict()
                res_dict[scene][task]['run_avg'] = OrderedDict()
                if task == 'multiview':
                    for bag in bag_size_str + ['bag_avg']:
                        res_dict[scene]['multiview']['run_avg'][
                            bag] = OrderedDict()

        # Stereo -- multiple runs
        t = time()
        cur_key = 'config_{}_stereo'.format(dataset)
        if cfg_orig.eval_stereo and cur_key in method and method[cur_key]:
            num_runs = getattr(cfg_orig,
                               'num_runs_{}_stereo'.format(cfg_orig.subset))
            cfg = deepcopy(cfg_orig)
            cfg.dataset = dataset
            cfg.task = 'stereo'
            for scene in scene_list:
                cfg.scene = scene

                res_dict[scene]['stereo']['run_avg'] = OrderedDict()
                for run in range(num_runs):
                    res_dict[scene]['stereo']['run_{}'.format(
                        run)] = OrderedDict()

                # Create list of things to gather
                metric_list = []
                metric_list += ['avg_num_keypoints']
                # metric_list += ['matching_scores_epipolar']
                metric_list += ['num_inliers']
                metric_list += ['matching_scores_depth_projection']
                metric_list += ['repeatability']
                metric_list += ['qt_auc']
                metric_list += ['timings']

                for run in range(num_runs):
                    # Compute and pack results
                    cfg.run = run
                    cur_dict = res_dict[scene]['stereo']['run_{}'.format(run)]
                    for metric in metric_list:
                        t_cur = time()
                        getattr(pack_helper, 'compute_' + metric)(cur_dict,
                                                                  cfg)
                        print(
                            ' -- Packing "{}"/"{}"/stereo, run: {}/{}, metric: {} [{:.02f} s]'
                            .format(dataset, scene, run + 1, num_runs, metric,
                                    time() - t_cur))

            # Compute average across runs, for stereo
            t_cur = time()
            pack_helper.average_stereo_over_runs(cfg, res_dict, num_runs)
            print(
                ' -- Packing "{}"/stereo: averaging over {} run(s) [{:.02f} s]'
                .format(dataset, num_runs,
                        time() - t_cur))

            # Compute average across scenes, for stereo
            t_cur = time()
            pack_helper.average_stereo_over_scenes(cfg, res_dict, num_runs)
            print(
                ' -- Packing "{}"/stereo: averaging over {} scene(s) [{:.02f} s]'
                .format(dataset, len(scene_list),
                        time() - t_cur))

            print(' -- Finished packing stereo in {:.01f} sec.'.format(time() -
                                                                       t))
        else:
            print('Skipping "{}/stereo"'.format(dataset))

        # Multiview -- multiple runs
        t = time()
        cur_key = 'config_{}_multiview'.format(dataset)
        if cfg_orig.eval_multiview and cur_key in method and method[cur_key]:
            num_runs = getattr(cfg, 'num_runs_{}_multiview'.format(cfg.subset))
            cfg = deepcopy(cfg_orig)
            cfg.dataset = dataset
            cfg.task = 'multiview'
            for scene in scene_list:
                cfg.scene = scene
                for run in ['run_avg'
                            ] + ['run_{}'.format(f) for f in range(num_runs)]:
                    res_dict[scene]['multiview'][run] = OrderedDict()
                    for bags_label in ['bag_avg'] + bag_size_str:
                        res_dict[scene]['multiview'][run][
                            bags_label] = OrderedDict()

                # Create list of things to gather
                metric_list = []
                metric_list += ['avg_num_keypoints']
                metric_list += ['num_input_matches']
                metric_list += ['qt_auc_colmap']
                metric_list += ['ATE']
                metric_list += ['colmap_stats']

                for run in range(num_runs):
                    for bag_size in bag_size_list:
                        # Compute and pack results
                        cfg.run = run
                        cfg.bag_size = bag_size
                        cur_dict = res_dict[scene]['multiview']
                        for metric in metric_list:
                            t_cur = time()
                            getattr(pack_helper, 'compute_' + metric)(
                                cur_dict['run_{}'.format(run)]['{}bag'.format(
                                    bag_size)], cfg)
                            print(
                                ' -- Packing "{}"/"{}"/multiview, run {}/{}, "{}", metric: {} [{:.02f} s]'
                                .format(dataset, scene, run + 1, num_runs,
                                        '{}bag'.format(bag_size), metric,
                                        time() - t_cur))

                        # Compute average across bags
                        for metric in cur_dict['run_{}'.format(run)]['25bag']:
                            pack_helper.average_multiview_over_bags(
                                cfg, cur_dict['run_{}'.format(run)],
                                bag_size_list)

            # Compute average across runs, for multiview
            t_cur = time()
            pack_helper.average_multiview_over_runs(cfg, res_dict, num_runs,
                                                    bag_size_str + ['bag_avg'])
            print(
                ' -- Packing "{}"/multiview: averaging over {} run(s) [{:.02f} s]'
                .format(dataset, num_runs,
                        time() - t_cur))

            # Compute average across scenes, for multiview
            t_cur = time()
            pack_helper.average_multiview_over_scenes(
                cfg, res_dict, num_runs, ['bag_avg'] + bag_size_str)
            print(
                ' -- Packing "{}"/multiview: averaging over {} scene(s) [{:.02f} s]'
                .format(dataset, len(scene_list),
                        time() - t_cur))

            print(' -- Finished packing multiview in {:.01f} sec.'.format(
                time() - t))

            # Relocalization -- multiple runs
            # TODO
        else:
            print('Skipping "{}/multiview"'.format(dataset))

    # Dump packed result
    print(' -- Saving to: "{}"'.format(
        cfg.method_dict['config_common']['json_label']))
    if not os.path.exists(cfg.path_pack):
        os.makedirs(cfg.path_pack)
    json_dump_file = os.path.join(
        cfg.path_pack,
        '{}.json'.format(cfg.method_dict['config_common']['json_label']))

    with open(json_dump_file, 'w') as outfile:
        json.dump(master_dict, outfile, indent=2)
예제 #10
0
def reformat_json(path_json):
    config = load_json(path_json)
    with open(path_json, 'w') as f:
        json.dump(config, f, indent=2)
예제 #11
0
    if cfg.path_json == '':
        if cfg.is_challenge:
            raise RuntimeError('Must provide json file for challenge submission')
        if not cfg.kp_name:
            raise RuntimeError('Must define kp_name')
        if not cfg.desc_name:
            raise RuntimeError('Must define desc_name')
        if cfg.match_name and cfg.num_keypoints != -1:
            raise RuntimeError('Can not crop keypoints list with a custom matcher')

        cfg.kp_name = validate_label(cfg.kp_name)
        cfg.desc_name = validate_label(cfg.desc_name)
        cfg.match_name = validate_label(cfg.match_name)
    else:
        # read keypoints, descriptor, and match name from json 
        method_list = load_json(cfg.path_json)
        if len(method_list)!=1:
            raise RuntimeError('Multiple method found in json file. Only support json fils with single method')
        cfg.method_dict = method_list[0]
        cfg.kp_name = cfg.method_dict['config_common']['keypoint']
        cfg.desc_name = cfg.method_dict['config_common']['descriptor']
        if cfg.method_dict['config_phototourism_stereo']['use_custom_matches']:
            cfg.match_name = {}
            for data_task in itertools.product(cfg.datasets, cfg.tasks):
                key = data_task[0]+'_'+data_task[1]
                cfg.match_name[key] = cfg.method_dict['config_'+key]['custom_matches_name']

            
     
    if cfg.is_challenge:
        # compute hash for h5 files
예제 #12
0
import numpy as np
from copy import deepcopy

from utils.io_helper import load_json, load_h5, save_h5
from compute_model import compute_model

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--json_method', type=str, required=True)
    parser.add_argument('--import_path', type=Path, required=True)
    cfg = parser.parse_args()
    cfg.subset = 'test'
    cfg.dataset = 'phototourism'
    cfg.num_opencv_threads = 0

    method_list = load_json(cfg.json_method)
    scene_list = load_json('json/data/phototourism_{}.json'.format(cfg.subset))

    num_cores = cfg.num_opencv_threads if cfg.num_opencv_threads > 0 else int(
        len(os.sched_getaffinity(0)) * 0.9)

    for method in method_list:
        label = method['config_common']['json_label']
        export_root = Path('../submission', label)
        export_root.mkdir(parents=True)
        cfg.method_dict = deepcopy(method)

        for seq in scene_list:
            print('Working on {}: {}/{}'.format(label, cfg.dataset, seq))
            (export_root / seq).mkdir()
            for n in ['keypoints.h5', 'descriptors.h5', 'scores.h5']: