def run_dataset(data: DataSetBase, input, output) -> None:
    recs_base = data.load_reconstruction(input)
    if len(recs_base) == 0:
        return

    rec_base = recs_base[0]
    tracks_manager = data.load_tracks_manager()
    rec_base.add_correspondences_from_tracks_manager(tracks_manager)

    images = data.images()
    remaining_images = set(images) - set(rec_base.shots)
    gcp = data.load_ground_control_points()
    report = {}
    rec_report = {}
    report["extend_reconstruction"] = [rec_report]
    rec, rec_report["grow"] = reconstruction.grow_reconstruction(
        data,
        tracks_manager,
        rec_base,
        remaining_images,
        gcp,
    )
    rec_report["num_remaining_images"] = len(remaining_images)
    report["not_reconstructed_images"] = list(remaining_images)
    data.save_reconstruction([rec], output)
    data.save_report(io.json_dumps(report), "reconstruction.json")
Example #2
0
    def write_report(self, data, graph,
                     features_time, matches_time, tracks_time):
        tracks, images = matching.tracks_and_images(graph)
        image_graph = bipartite.weighted_projected_graph(graph, images)
        matrix = []
        for im1 in data.images():
            row = []
            for im2 in data.images():
                if im1 in image_graph and im2 in image_graph[im1]:
                    row.append(image_graph[im1][im2]['weight'])
                else:
                    row.append(0)
            matrix.append(row)

        report = {
            "wall_times": {
                "load_features": features_time,
                "load_matches": matches_time,
                "compute_tracks": tracks_time,
            },
            "wall_time": features_time + matches_time + tracks_time,
            "num_images": len(images),
            "num_tracks": len(tracks),
            "viewing_graph": matrix
        }
        data.save_report(io.json_dumps(report), 'tracks.json')
Example #3
0
def detect(args):
    image, data = args

    log.setup()

    need_words = data.config[
        'matcher_type'] == 'WORDS' or data.config['matching_bow_neighbors'] > 0
    need_flann = data.config['matcher_type'] == 'FLANN'
    has_words = not need_words or data.words_exist(image)
    has_flann = not need_flann or data.feature_index_exists(image)
    has_features = data.features_exist(image)

    if has_features and has_flann and has_words:
        logger.info('Skip recomputing {} features for image {}'.format(
            data.feature_type().upper(), image))
        return

    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    start = timer()

    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        data.load_image(image), data.config)

    fmask = data.load_features_mask(image, p_unmasked)

    p_unsorted = p_unmasked[fmask]
    f_unsorted = f_unmasked[fmask]
    c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning('No features found in image {}'.format(image))
        return

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    data.save_features(image, p_sorted, f_sorted, c_sorted)

    if need_flann:
        index = features.build_flann_index(f_sorted, data.config)
        data.save_feature_index(image, index)
    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config['bow_words_to_match']
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          data.config['bow_matcher_type'])
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), 'features/{}.json'.format(image))
Example #4
0
def run_dataset(data):
    """ Compute the SfM reconstruction. """

    tracks_manager = data.load_tracks_manager()
    report, reconstructions = reconstruction.\
        incremental_reconstruction(data, tracks_manager)
    data.save_reconstruction(reconstructions)
    data.save_report(io.json_dumps(report), 'reconstruction.json')
Example #5
0
 def write_report(self, data, preport, pairs, wall_time):
     report = {
         "wall_time": wall_time,
         "num_pairs": len(pairs),
         "pairs": pairs,
     }
     report.update(preport)
     data.save_report(io.json_dumps(report), 'matches.json')
 def run(self, args):
     start = time.time()
     data = dataset.DataSet(args.dataset)
     report = reconstruction.incremental_reconstruction(data)
     end = time.time()
     with open(data.profile_log(), 'a') as fout:
         fout.write('reconstruct: {0}\n'.format(end - start))
     data.save_report(io.json_dumps(report), 'reconstruction.json')
Example #7
0
def write_report(data: DataSetBase, preport, pairs, wall_time):
    report = {
        "wall_time": wall_time,
        "num_pairs": len(pairs),
        "pairs": pairs,
    }
    report.update(preport)
    data.save_report(io.json_dumps(report), "matches.json")
 def run(self, args):
     start = time.time()
     data = dataset.DataSet(args.dataset)
     report = reconstruction.incremental_reconstruction(data)
     end = time.time()
     with open(data.profile_log(), 'a') as fout:
         fout.write('reconstruct: {0}, error: {1}, bundle time: {2}\n'.format(end - start, \
             reconstruction.get_avg_reprojection_error(), \
             reconstruction.get_total_bundle_time()))
     data.save_report(io.json_dumps(report), 'reconstruction.json')
Example #9
0
def run_dataset(data: DataSetBase, input: str, output: str):
    """ Reconstruct the from a prior reconstruction. """

    tracks_manager = data.load_tracks_manager()
    rec_prior = data.load_reconstruction(input)
    if len(rec_prior) > 0:
        report, rec = reconstruction.reconstruct_from_prior(
            data, tracks_manager, rec_prior[0])
    data.save_reconstruction([rec], output)
    data.save_report(io.json_dumps(report), "reconstruction.json")
Example #10
0
def detect(args):
    image, data = args

    log.setup()

    need_words = (data.config["matcher_type"] == "WORDS"
                  or data.config["matching_bow_neighbors"] > 0)
    has_words = not need_words or data.words_exist(image)
    has_features = data.features_exist(image)

    if has_features and has_words:
        logger.info("Skip recomputing {} features for image {}".format(
            data.feature_type().upper(), image))
        return

    logger.info("Extracting {} features for image {}".format(
        data.feature_type().upper(), image))

    start = timer()

    image_array = data.load_image(image)
    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        image_array, data.config, is_high_res_panorama(data, image,
                                                       image_array))

    fmask = data.load_features_mask(image, p_unmasked)

    p_unsorted = p_unmasked[fmask]
    f_unsorted = f_unmasked[fmask]
    c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning("No features found in image {}".format(image))

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    data.save_features(image, p_sorted, f_sorted, c_sorted)

    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config["bow_words_to_match"]
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          data.config["bow_matcher_type"])
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), "features/{}.json".format(image))
Example #11
0
    def write_report(self, data, wall_time):
        image_reports = []
        for image in data.images():
            txt = data.load_report('features/{}.json'.format(image))
            image_reports.append(io.json_loads(txt))

        report = {
            "wall_time": wall_time,
            "image_reports": image_reports
        }
        data.save_report(io.json_dumps(report), 'features.json')
Example #12
0
def write_report(data: DataSetBase, wall_time: float):
    image_reports = []
    for image in data.images():
        try:
            txt = data.load_report("features/{}.json".format(image))
            image_reports.append(io.json_loads(txt))
        except IOError:
            logger.warning("No feature report image {}".format(image))

    report = {"wall_time": wall_time, "image_reports": image_reports}
    data.save_report(io.json_dumps(report), "features.json")
Example #13
0
 def run(self, args):
     start = time.time()
     data = dataset.DataSet(args.dataset)
     tracks_manager = data.load_tracks_manager()
     report, reconstructions = reconstruction.\
         incremental_reconstruction(data, tracks_manager)
     end = time.time()
     with open(data.profile_log(), 'a') as fout:
         fout.write('reconstruct: {0}\n'.format(end - start))
     data.save_reconstruction(reconstructions)
     data.save_report(io.json_dumps(report), 'reconstruction.json')
Example #14
0
    def write_report(self, data, wall_time):
        image_reports = []
        for image in data.images():
            try:
                txt = data.load_report('features/{}.json'.format(image))
                image_reports.append(io.json_loads(txt))
            except IOError:
                logger.warning('No feature report image {}'.format(image))

        report = {"wall_time": wall_time, "image_reports": image_reports}
        data.save_report(io.json_dumps(report), 'features.json')
def run_dataset(data: DataSetBase, input: str, output: str):
    """ Reconstruct the from a prior reconstruction. """

    tracks_manager = data.load_tracks_manager()
    rec_prior = data.load_reconstruction(input)
    if len(rec_prior) > 0:
        report, rec = reconstruction.reconstruct_from_prior(
            data, tracks_manager, rec_prior[0])
    # pyre-fixme[61]: `rec` may not be initialized here.
    data.save_reconstruction([rec], output)
    # pyre-fixme[61]: `report` may not be initialized here.
    data.save_report(io.json_dumps(report), "reconstruction.json")
Example #16
0
    def write_report(self, data, preport, pairs, wall_time):
        pair_list = []
        for im1, others in pairs.items():
            for im2 in others:
                pair_list.append((im1, im2))

        report = {
            "wall_time": wall_time,
            "num_pairs": len(pair_list),
            "pairs": pair_list,
        }
        report.update(preport)
        data.save_report(io.json_dumps(report), 'matches.json')
Example #17
0
    def write_report(self, data, preport, pairs, wall_time):
        pair_list = []
        for im1, others in pairs.items():
            for im2 in others:
                pair_list.append((im1, im2))

        report = {
            "wall_time": wall_time,
            "num_pairs": len(pair_list),
            "pairs": pair_list,
        }
        report.update(preport)
        data.save_report(io.json_dumps(report), 'matches.json')
Example #18
0
    def write_report(self, data, wall_time):
        image_reports = []
        for image in data.images():
            try:
                txt = data.load_report('features/{}.json'.format(image))
                image_reports.append(io.json_loads(txt))
            except IOError:
                logger.warning('No feature report image {}'.format(image))

        report = {
            "wall_time": wall_time,
            "image_reports": image_reports
        }
        data.save_report(io.json_dumps(report), 'features.json')
Example #19
0
def formulate_graph(args):
    log.setup()

    data, images, scores, criteria, edge_threshold = args
    start = timer()
    G = nx.Graph()
    for i, img1 in enumerate(sorted(images)):
        for j, img2 in enumerate(sorted(images)):
            if j <= i:
                continue
            if img1 in scores and img2 in scores[img1]:
                if criteria == 'inlier-logp':
                    inlier_logp = -np.log(scores[img1][img2])
                    if inlier_logp < -np.log(edge_threshold):
                        G.add_edge(img1, img2, weight=inlier_logp)
                    # if img1 == '0000.jpg' and img2 == '0013.jpg':
                    #     print ('{} - {}  :  {}  /  {}    et: {}'.format(img1, img2, scores[img1][img2], inlier_logp, -np.log(edge_threshold)))
                    # else:
                    #     logger.info('scores: {} / {}    et: {}'.format(scores[img1][img2], inlier_logp, edge_threshold))
                elif 'cost' in criteria:
                    if scores[img1][img2] < edge_threshold:
                        G.add_edge(img1, img2, weight=scores[img1][img2])
                else:
                    if scores[img1][img2] >= edge_threshold:
                        G.add_edge(img1, img2, weight=scores[img1][img2])

    try:
        pagerank = nx.pagerank(G, alpha=0.9)
    except:
        pagerank = {}
        for n in G.nodes():
            pagerank[n] = 1.0
    lcc = nx.clustering(G, nodes=G.nodes(), weight='weight')

    for n in G.nodes():
        G.node[n]['pagerank'] = pagerank[n]
        G.node[n]['lcc'] = lcc[n]

    end = timer()
    report = {
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), 'similarity-graphs.json')

    # G = nx.minimum_spanning_tree(G)
    # i1 = sorted(images)[0]
    # i2 = sorted(images)[-1]
    # G.add_edge(i1, i2, weight=scores[i1][i2])
    return G
Example #20
0
    def write_report(self, data, tracks_manager,
                     features_time, matches_time, tracks_time):
        view_graph = [(k[0], k[1], len(v)) for k, v in tracks_manager.get_all_common_observations_all_pairs().items()]

        report = {
            "wall_times": {
                "load_features": features_time,
                "load_matches": matches_time,
                "compute_tracks": tracks_time,
            },
            "wall_time": features_time + matches_time + tracks_time,
            "num_images": tracks_manager.num_shots(),
            "num_tracks": tracks_manager.num_tracks(),
            "view_graph": view_graph
        }
        data.save_report(io.json_dumps(report), 'tracks.json')
Example #21
0
    def write_report(self, data, reconstructions, wall_time):
    
        image_reports = []
        for reconstruction in reconstructions:
            for shot in reconstruction.shots.values():
                try:
                    txt = data.load_report( 'full_mosaic_reprojection/{}.json'.format( shot.id ) )
                    image_reports.append( io.json_loads( txt ) )
                except IOError:
                    logger.warning( 'No full mosaic report image {}'.format( shot.id ) )

        report = {
            "wall_time": wall_time,
            "image_reports": image_reports
        }
        
        data.save_report( io.json_dumps(report), 'full_mosaic_reprojection.json' )
Example #22
0
 def run(self, args):
     start = time.time()
     data = dataset.DataSet(args.dataset)
     tracks_manager = data.load_tracks_manager()
     if (args.localize):
         report, reconstructions = reconstruction.\
         incremental_reconstruction(data, tracks_manager, True)
     else:
         report, reconstructions = reconstruction.\
         incremental_reconstruction(data, tracks_manager)
     end = time.time()
     with open(data.profile_log(), 'a') as fout:
         fout.write('reconstruct: {0}\n'.format(end - start))
     if (not (args.localize)):
         data.save_reconstruction(reconstructions)
         data.save_report(io.json_dumps(report), 'reconstruction.json')
     else:
         print("Finished Localization!")
Example #23
0
def vt_rankings(args):
    log.setup()

    images, data = args
    libvot = data.config['libvot']

    start = timer()
    subprocess.Popen("ls -d {}/images/* > {}/vt_image_list.txt".format(data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()
    subprocess.Popen("{}/build/bin/libvot_feature -thread_num 10 -output_folder {}/sift/ {}/vt_image_list.txt".format(libvot, data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()
    subprocess.Popen("ls -d {}/sift/*.sift > {}/vt_sift_list.txt".format(data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()
    subprocess.Popen("{}/build/bin/image_search {}/vt_sift_list.txt {}/vocab_out".format(libvot, data.data_path, data.data_path), shell=True, stdout=subprocess.PIPE).stdout.read()

    end = timer()
    report = {
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report),
                     'rankings.json')
Example #24
0
def run_dataset(data: DataSetBase,
                algorithm: reconstruction.ReconstructionAlgorithm) -> None:
    """Compute the SfM reconstruction."""

    tracks_manager = data.load_tracks_manager()

    if algorithm == reconstruction.ReconstructionAlgorithm.INCREMENTAL:
        report, reconstructions = reconstruction.incremental_reconstruction(
            data, tracks_manager)
    elif algorithm == reconstruction.ReconstructionAlgorithm.TRIANGULATION:
        report, reconstructions = reconstruction.triangulation_reconstruction(
            data, tracks_manager)
    else:
        raise RuntimeError(
            f"Unsupported algorithm for reconstruction {algorithm}")

    data.save_reconstruction(reconstructions)
    data.save_report(io.json_dumps(report), "reconstruction.json")
Example #25
0
def write_report(data, tracks_manager, features_time, matches_time,
                 tracks_time):
    view_graph = [
        (k[0], k[1], v)
        for k, v in tracks_manager.get_all_pairs_connectivity().items()
    ]

    report = {
        "wall_times": {
            "load_features": features_time,
            "load_matches": matches_time,
            "compute_tracks": tracks_time,
        },
        "wall_time": features_time + matches_time + tracks_time,
        "num_images": tracks_manager.num_shots(),
        "num_tracks": tracks_manager.num_tracks(),
        "view_graph": view_graph,
    }
    data.save_report(io.json_dumps(report), "tracks.json")
Example #26
0
    def run(self, args):
        start = time.time()
        data = dataset.DataSet(args.dataset)
        graph = data.load_tracks_graph()
        reconstruction.output_pose_log = data.config["output_pose_log"]
        reconstruction.pose_fix_type = data.config["pose_fix_type"]
        report, reconstructions = reconstruction.\
            incremental_reconstruction(data, graph)
        end = time.time()

        with open(data.profile_log(), 'a') as fout:
            fout.write('reconstruct: {0}\n'.format(end - start))
        # reconstructions = reconstruction.merge_reconstructions(reconstructions, data.config)
        # for k, r in enumerate(reconstructions):
        #     logger.info("Reconstruction {}: {} images, {} points".format(
        #         k, len(r.shots), len(r.points)))
        # logger.info("{} partial reconstructions in total.".format(
        #     len(reconstructions)))

        data.save_reconstruction(reconstructions)
        data.save_report(io.json_dumps(report), 'reconstruction.json')
def detect(args):
    log.setup()

    image, data = args
    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    if not data.feature_index_exists(image):
        start = timer()
        mask = data.load_combined_mask(image)
        if mask is not None:
            logger.info('Found mask to apply for image {}'.format(image))
        preemptive_max = data.config['preemptive_max']
        p_unsorted, f_unsorted, c_unsorted = features.extract_features(
            data.load_image(image), data.config, mask)
        if len(p_unsorted) == 0:
            return

        size = p_unsorted[:, 2]
        order = np.argsort(size)
        p_sorted = p_unsorted[order, :]
        f_sorted = f_unsorted[order, :]
        c_sorted = c_unsorted[order, :]
        p_pre = p_sorted[-preemptive_max:]
        f_pre = f_sorted[-preemptive_max:]
        data.save_features(image, p_sorted, f_sorted, c_sorted)
        data.save_preemptive_features(image, p_pre, f_pre)

        if data.config['matcher_type'] == 'FLANN':
            index = features.build_flann_index(f_sorted, data.config)
            data.save_feature_index(image, index)

        end = timer()
        report = {
            "image": image,
            "num_features": len(p_sorted),
            "wall_time": end - start,
        }
        data.save_report(io.json_dumps(report),
                         'features/{}.json'.format(image))
Example #28
0
def detect(args):
    log.setup()

    image, data = args
    logger.info('Extracting {} features for image {}'.format(
        data.feature_type().upper(), image))

    if not data.feature_index_exists(image):
        start = timer()
        mask = data.mask_as_array(image)
        if mask is not None:
            logger.info('Found mask to apply for image {}'.format(image))
        preemptive_max = data.config['preemptive_max']
        p_unsorted, f_unsorted, c_unsorted = features.extract_features(
            data.image_as_array(image), data.config, mask)
        if len(p_unsorted) == 0:
            return

        size = p_unsorted[:, 2]
        order = np.argsort(size)
        p_sorted = p_unsorted[order, :]
        f_sorted = f_unsorted[order, :]
        c_sorted = c_unsorted[order, :]
        p_pre = p_sorted[-preemptive_max:]
        f_pre = f_sorted[-preemptive_max:]
        data.save_features(image, p_sorted, f_sorted, c_sorted)
        data.save_preemptive_features(image, p_pre, f_pre)

        if data.config['matcher_type'] == 'FLANN':
            index = features.build_flann_index(f_sorted, data.config)
            data.save_feature_index(image, index)

        end = timer()
        report = {
            "image": image,
            "num_features": len(p_sorted),
            "wall_time": end - start,
        }
        data.save_report(io.json_dumps(report),
                         'features/{}.json'.format(image))
Example #29
0
def write_report(data, graph, features_time, matches_time, tracks_time):
    tracks, images = matching.tracks_and_images(graph)
    image_graph = bipartite.weighted_projected_graph(graph, images)
    view_graph = []
    for im1 in data.images():
        for im2 in data.images():
            if im1 in image_graph and im2 in image_graph[im1]:
                weight = image_graph[im1][im2]['weight']
                view_graph.append((im1, im2, weight))

    report = {
        "wall_times": {
            "load_features": features_time,
            "load_matches": matches_time,
            "compute_tracks": tracks_time,
        },
        "wall_time": features_time + matches_time + tracks_time,
        "num_images": len(images),
        "num_tracks": len(tracks),
        "view_graph": view_graph
    }
    data.save_report(io.json_dumps(report), 'yan.json')
Example #30
0
    def write_report(self, data, graph,
                     features_time, matches_time, tracks_time):
        tracks, images = matching.tracks_and_images(graph)
        image_graph = bipartite.weighted_projected_graph(graph, images)
        view_graph = []
        for im1 in data.images():
            for im2 in data.images():
                if im1 in image_graph and im2 in image_graph[im1]:
                    weight = image_graph[im1][im2]['weight']
                    view_graph.append((im1, im2, weight))

        report = {
            "wall_times": {
                "load_features": features_time,
                "load_matches": matches_time,
                "compute_tracks": tracks_time,
            },
            "wall_time": features_time + matches_time + tracks_time,
            "num_images": len(images),
            "num_tracks": len(tracks),
            "view_graph": view_graph
        }
        data.save_report(io.json_dumps(report), 'tracks.json')
Example #31
0
def detect(args: Tuple[str, DataSetBase]):
    image, data = args

    log.setup()

    need_words = (data.config["matcher_type"] == "WORDS"
                  or data.config["matching_bow_neighbors"] > 0)
    has_words = not need_words or data.words_exist(image)
    has_features = data.features_exist(image)

    if has_features and has_words:
        logger.info("Skip recomputing {} features for image {}".format(
            data.feature_type().upper(), image))
        return

    logger.info("Extracting {} features for image {}".format(
        data.feature_type().upper(), image))

    start = timer()

    image_array = data.load_image(image)
    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        image_array, data.config, is_high_res_panorama(data, image,
                                                       image_array))

    # Load segmentation and bake it in the data
    if data.config["features_bake_segmentation"]:
        exif = data.load_exif(image)
        panoptic_data = [None, None]
        for i, p_data in enumerate(
            [data.load_segmentation(image),
             data.load_instances(image)]):
            if p_data is None:
                continue
            new_height, new_width = p_data.shape
            ps = upright.opensfm_to_upright(
                p_unmasked[:, :2],
                exif["width"],
                exif["height"],
                exif["orientation"],
                new_width=new_width,
                new_height=new_height,
            ).astype(int)
            panoptic_data[i] = p_data[ps[:, 1], ps[:, 0]]
        s_unsorted, i_unsorted = panoptic_data
        p_unsorted = p_unmasked
        f_unsorted = f_unmasked
        c_unsorted = c_unmasked
    # Load segmentation, make a mask from it mask and apply it
    else:
        s_unsorted, i_unsorted = None, None
        fmask = data.load_features_mask(image, p_unmasked)
        p_unsorted = p_unmasked[fmask]
        f_unsorted = f_unmasked[fmask]
        c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning("No features found in image {}".format(image))

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    # pyre-fixme[16]: `None` has no attribute `__getitem__`.
    s_sorted = s_unsorted[order] if s_unsorted is not None else None
    i_sorted = i_unsorted[order] if i_unsorted is not None else None
    data.save_features(image, p_sorted, f_sorted, c_sorted, s_sorted, i_sorted)

    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config["bow_words_to_match"]
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          data.config["bow_matcher_type"])
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), "features/{}.json".format(image))
Example #32
0
 def write_report(self, data, wall_time):
     report = {
         "wall_time": wall_time
     }
     data.save_report(io.json_dumps(report), 'rankings.json')
Example #33
0
def detect(
    image: str,
    image_array: np.ndarray,
    segmentation_array: Optional[np.ndarray],
    instances_array: Optional[np.ndarray],
    data: DataSetBase,
    force: bool = False,
) -> None:
    log.setup()

    need_words = (
        data.config["matcher_type"] == "WORDS"
        or data.config["matching_bow_neighbors"] > 0
    )
    has_words = not need_words or data.words_exist(image)
    has_features = data.features_exist(image)

    if not force and has_features and has_words:
        logger.info(
            "Skip recomputing {} features for image {}".format(
                data.feature_type().upper(), image
            )
        )
        return

    logger.info(
        "Extracting {} features for image {}".format(data.feature_type().upper(), image)
    )

    start = timer()

    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        image_array, data.config, is_high_res_panorama(data, image, image_array)
    )

    # Load segmentation and bake it in the data
    if data.config["features_bake_segmentation"]:
        exif = data.load_exif(image)
        s_unsorted, i_unsorted = bake_segmentation(
            image_array, p_unmasked, segmentation_array, instances_array, exif
        )
        p_unsorted = p_unmasked
        f_unsorted = f_unmasked
        c_unsorted = c_unmasked
    # Load segmentation, make a mask from it mask and apply it
    else:
        s_unsorted, i_unsorted = None, None
        fmask = masking.load_features_mask(data, image, p_unmasked)
        p_unsorted = p_unmasked[fmask]
        f_unsorted = f_unmasked[fmask]
        c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning("No features found in image {}".format(image))

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    if s_unsorted is not None:
        semantic_data = features.SemanticData(
            s_unsorted[order],
            i_unsorted[order] if i_unsorted is not None else None,
            data.segmentation_labels(),
        )
    else:
        semantic_data = None
    features_data = features.FeaturesData(p_sorted, f_sorted, c_sorted, semantic_data)
    data.save_features(image, features_data)

    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config["bow_words_to_match"]
        closest_words = bows.map_to_words(
            f_sorted, n_closest, data.config["bow_matcher_type"]
        )
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), "features/{}.json".format(image))
Example #34
0
 def save_exif(self, image, data):
     io.mkdir_p(self.__exif_path())
     with open(self.__exif_file(image), 'w') as fout:
         fout.write(io.json_dumps(data))
Example #35
0
 def write_report(self, data, report):
     data.save_report(io.json_dumps(report), 'classify_features.json')
Example #36
0
 def save_navigation_graph(self, navigation_graphs):
     with open(self.__navigation_graph_file(), 'w') as fout:
         fout.write(io.json_dumps(navigation_graphs))
Example #37
0
 def save_reconstruction(self, reconstruction, filename=None, indent=4):
     with open(self.__reconstruction_file(filename), 'w') as fout:
         fout.write(io.json_dumps(reconstruction))
Example #38
0
 def save_exif(self, image, data):
     with open(self.__exif_file(image), 'w') as fout:
         fout.write(io.json_dumps(data))
Example #39
0
 def save_camera_models(self, camera_models):
     """Save camera models data"""
     with open(self.__camera_models_file(), 'w') as fout:
         fout.write(io.json_dumps(camera_models))
Example #40
0
 def save_camera_models(self, camera_models):
     """Save camera models data"""
     with open(self.__camera_models_file(), 'w') as fout:
         obj = io.cameras_to_json(camera_models)
         fout.write(io.json_dumps(obj))