示例#1
0
 def load_mask(self, data: DataSetBase, image):
     points, _, _, segmentations, _ = self._load_all_data_unmasked(
         data, image)
     if data.config[
             "features_bake_segmentation"] and segmentations is not None:
         ignore_values = set(data.segmentation_ignore_values(image))
         return [
             False if segmentations[i] in ignore_values else True
             for i in range(len(segmentations))
         ]
     else:
         if points is None:
             return None
         return data.load_features_mask(image, points[:, :2])
示例#2
0
 def load_mask(self, data: DataSetBase, image: str) -> Optional[np.ndarray]:
     all_features_data = self._load_all_data_unmasked(data, image)
     if not all_features_data:
         return None
     if (data.config["features_bake_segmentation"]
             and all_features_data.semantic is not None):
         # pyre-fixme [16]: `Optional` has no attribute `segmentation`
         segmentations = all_features_data.semantic.segmentation
         ignore_values = set(data.segmentation_ignore_values(image))
         return np.array([
             False if segmentations[i] in ignore_values else True
             for i in range(len(segmentations))
         ])
     else:
         return data.load_features_mask(image,
                                        all_features_data.points[:, :2])
示例#3
0
def detect(
    image: str,
    image_array: np.ndarray,
    segmentation_array: Optional[np.ndarray],
    instances_array: Optional[np.ndarray],
    data: DataSetBase,
):
    log.setup()

    need_words = (data.config["matcher_type"] == "WORDS"
                  or data.config["matching_bow_neighbors"] > 0)
    has_words = not need_words or data.words_exist(image)
    has_features = data.features_exist(image)

    if has_features and has_words:
        logger.info("Skip recomputing {} features for image {}".format(
            data.feature_type().upper(), image))
        return

    logger.info("Extracting {} features for image {}".format(
        data.feature_type().upper(), image))

    start = timer()

    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        image_array, data.config, is_high_res_panorama(data, image,
                                                       image_array))

    # Load segmentation and bake it in the data
    if data.config["features_bake_segmentation"]:
        exif = data.load_exif(image)
        s_unsorted, i_unsorted = bake_segmentation(p_unmasked,
                                                   segmentation_array,
                                                   instances_array, exif)
        p_unsorted = p_unmasked
        f_unsorted = f_unmasked
        c_unsorted = c_unmasked
    # Load segmentation, make a mask from it mask and apply it
    else:
        s_unsorted, i_unsorted = None, None
        fmask = data.load_features_mask(image, p_unmasked)
        p_unsorted = p_unmasked[fmask]
        f_unsorted = f_unmasked[fmask]
        c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning("No features found in image {}".format(image))

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    if s_unsorted is not None and i_unsorted is not None:
        semantic_data = features.SemanticData(s_unsorted[order],
                                              i_unsorted[order],
                                              data.segmentation_labels())
    else:
        semantic_data = None
    features_data = features.FeaturesData(p_sorted, f_sorted, c_sorted,
                                          semantic_data)
    data.save_features(image, features_data)

    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config["bow_words_to_match"]
        closest_words = bows.map_to_words(f_sorted, n_closest,
                                          data.config["bow_matcher_type"])
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), "features/{}.json".format(image))