def predict(self, chips, windows):
        """Return predictions for a chip using model.

        Args:
            chips: [[height, width, channels], ...] numpy array of chips
            windows: List of boxes that are the windows aligned with the chips.

        Return:
            Labels object containing predictions
        """
        if self.learner is None:
            self.load_model()

        batch_out = self.learner.numpy_predict(chips, raw_out=False)
        labels = ObjectDetectionLabels.make_empty()

        for chip_ind, out in enumerate(batch_out):
            window = windows[chip_ind]
            boxes = out['boxes']
            class_ids = out['class_ids']
            scores = out['scores']
            boxes = ObjectDetectionLabels.local_to_global(boxes, window)
            labels += ObjectDetectionLabels(boxes, class_ids, scores=scores)

        return labels
Ejemplo n.º 2
0
def _make_chip_pos_windows(image_extent, label_store, chip_size):
    chip_size = chip_size
    pos_windows = []
    boxes = label_store.get_labels().get_boxes()
    done_boxes = set()

    # Get a random window around each box. If a box was previously included
    # in a window, then it is skipped.
    for box in boxes:
        if box.tuple_format() not in done_boxes:
            # If this  object is bigger than the chip,
            # don't use this box.
            if chip_size < box.get_width() or chip_size < box.get_height():
                log.warning('Label is larger than chip size: {} '
                            'Skipping this label'.format(box.tuple_format()))
                continue

            window = box.make_random_square_container(chip_size)
            pos_windows.append(window)

            # Get boxes that lie completely within window
            window_boxes = label_store.get_labels(window=window)
            window_boxes = ObjectDetectionLabels.get_overlapping(
                window_boxes, window, ioa_thresh=1.0)
            window_boxes = window_boxes.get_boxes()
            window_boxes = [box.tuple_format() for box in window_boxes]
            done_boxes.update(window_boxes)

    return pos_windows
Ejemplo n.º 3
0
 def get_train_labels(self, window, scene):
     window_labels = scene.ground_truth_label_source.get_labels(
         window=window)
     return ObjectDetectionLabels.get_overlapping(
         window_labels,
         window,
         ioa_thresh=self.config.chip_options.ioa_thresh,
         clip=True)
    def __init__(self, vector_source, extent=None):
        """Constructor.

        Args:
            vector_source: (VectorSource)
            extent: Box used to filter the labels by extent
        """
        self.labels = ObjectDetectionLabels.from_geojson(
            vector_source.get_geojson(), extent=extent)
Ejemplo n.º 5
0
def make_neg_windows(raster_source, label_store, chip_size, nb_windows,
                     max_attempts, filter_windows):
    extent = raster_source.get_extent()
    neg_windows = []
    for _ in range(max_attempts):
        for _ in range(max_attempts):
            window = extent.make_random_square(chip_size)
            if any(filter_windows([window])):
                break
        chip = raster_source.get_chip(window)
        labels = ObjectDetectionLabels.get_overlapping(
            label_store.get_labels(), window, ioa_thresh=0.2)

        # If no labels and not blank, append the chip
        if len(labels) == 0 and np.sum(chip.ravel()) > 0:
            neg_windows.append(window)

        if len(neg_windows) == nb_windows:
            break

    return list(neg_windows)
    def write_sample(self, sample: DataSample):
        """
        This writes a training or validation sample to
        (train|valid)/img/{scene_id}-{ind}.png and updates
        some COCO data structures.
        """
        split = 'train' if sample.is_train else 'valid'
        split_dir = join(self.sample_dir, split)
        img_dir = join(split_dir, 'img')
        make_dir(img_dir)
        img_fn = '{}-{}.png'.format(sample.scene_id, self.sample_ind)
        img_path = join(img_dir, img_fn)
        save_img(sample.chip, img_path)

        images = self.splits[split]['images']
        annotations = self.splits[split]['annotations']

        images.append({
            'file_name': img_fn,
            'id': self.sample_ind,
            'height': sample.chip.shape[0],
            'width': sample.chip.shape[1]
        })

        npboxes = sample.labels.get_npboxes()
        npboxes = ObjectDetectionLabels.global_to_local(npboxes, sample.window)
        for box_ind, (box, class_id) in enumerate(
                zip(npboxes, sample.labels.get_class_ids())):
            bbox = [box[1], box[0], box[3] - box[1], box[2] - box[0]]
            bbox = [int(i) for i in bbox]
            annotations.append({
                'id': '{}-{}'.format(self.sample_ind, box_ind),
                'image_id': self.sample_ind,
                'bbox': bbox,
                'category_id': int(class_id)
            })

        self.sample_ind += 1
    def get_labels(self, window=None):
        if window is None:
            return self.labels

        return ObjectDetectionLabels.get_overlapping(self.labels, window)
Ejemplo n.º 8
0
 def post_process_predictions(self, labels, scene):
     return ObjectDetectionLabels.prune_duplicates(
         labels,
         score_thresh=self.config.predict_options.score_thresh,
         merge_thresh=self.config.predict_options.merge_thresh)
 def empty_labels(self):
     return ObjectDetectionLabels.make_empty()
 def get_labels(self):
     vector_source = GeoJSONVectorSourceConfig(uri=self.uri,
                                               default_class_id=None).build(
                                                   self.class_config,
                                                   self.crs_transformer)
     return ObjectDetectionLabels.from_geojson(vector_source.get_geojson())