Exemplo n.º 1
0
    def run_evaluation(self):
        progress = Progress('metric evaluation', self._project_gt.total_items)
        for ds_name in self._project_gt.datasets.keys():
            ds_gt = self._project_gt.datasets.get(ds_name)
            ds_pred = self._project_pred.datasets.get(ds_name)

            for sample_name in ds_gt:
                try:
                    ann_gt = Annotation.load_json_file(ds_gt.get_ann_path(sample_name), self._project_gt.meta)
                    ann_pred = Annotation.load_json_file(ds_pred.get_ann_path(sample_name), self._project_pred.meta)
                    self._metric.add_pair(ann_gt, ann_pred)
                except ValueError as e:
                    logger.warning('An error has occured ({}). Sample "{}" in dataset "{}" will be skipped'
                                   .format(str(e), sample_name, ds_gt.name))
                progress.iter_done_report()
Exemplo n.º 2
0
    def run_inference(self):
        inference_mode = InferenceModeFactory.create(
            self._inference_mode_config, self._in_project.meta,
            self._single_image_inference)
        out_project = Project(
            os.path.join(TaskPaths.RESULTS_DIR, self._in_project.name),
            OpenMode.CREATE)
        out_project.set_meta(inference_mode.out_meta)

        progress_bar = Progress('Model applying: ',
                                self._in_project.total_items)
        for in_dataset in self._in_project:
            out_dataset = out_project.create_dataset(in_dataset.name)
            for in_item_name in in_dataset:
                # Use output project meta so that we get an annotation that is already in the context of the output
                # project (with added object classes etc).
                in_item_paths = in_dataset.get_item_paths(in_item_name)
                in_img = sly_image.read(in_item_paths.img_path)
                in_ann = Annotation.load_json_file(in_item_paths.ann_path,
                                                   inference_mode.out_meta)
                logger.trace('Will process image',
                             extra={
                                 'dataset_name': in_dataset.name,
                                 'image_name': in_item_name
                             })
                inference_annotation = inference_mode.infer_annotate(
                    in_img, in_ann)
                out_dataset.add_item_file(in_item_name,
                                          in_item_paths.img_path,
                                          ann=inference_annotation)

                progress_bar.iter_done_report()

        report_inference_finished()
Exemplo n.º 3
0
def single_inference_process_fn(inference_initializer, inference_mode_config,
                                in_project_meta_json, request_queue,
                                response_queue):
    """Loads a separate model, processes requests from request_queue, results go to result_queue.

    None request signals the process to finish.
    """
    single_image_inference = inference_initializer()
    inference_mode = InferenceModeFactory.create(
        inference_mode_config, ProjectMeta.from_json(in_project_meta_json),
        single_image_inference)
    out_meta_json = inference_mode.out_meta.to_json()

    req = ''
    while req is not None:
        req = request_queue.get()
        if req is not None:
            in_img = sly_image.read(req.item_paths.img_path)
            in_ann = Annotation.load_json_file(req.item_paths.ann_path,
                                               inference_mode.out_meta)
            ann = inference_mode.infer_annotate(in_img, in_ann)
            resp = InferenceResponse(ds_name=req.ds_name,
                                     item_name=req.item_name,
                                     item_paths=req.item_paths,
                                     ann_json=ann.to_json(),
                                     meta_json=out_meta_json)
            response_queue.put(resp)
        request_queue.task_done()
Exemplo n.º 4
0
def save_project_as_pascal_voc_detection(save_path, project: Project):

    # Create root pascal 'datasets' folders
    for dataset in project.datasets:
        pascal_dataset_path = os.path.join(save_path, dataset.name)
        pascal_dataset_relative_path = os.path.relpath(pascal_dataset_path,
                                                       save_path)

        images_dir = os.path.join(pascal_dataset_path, 'JPEGImages')
        anns_dir = os.path.join(pascal_dataset_path, 'Annotations')
        lists_dir = os.path.join(pascal_dataset_path, 'ImageSets/Layout')

        fs_utils.mkdir(pascal_dataset_path)
        for subdir in [
                'ImageSets',  # Train list, Val list, etc.
                'ImageSets/Layout',
                'Annotations',
                'JPEGImages'
        ]:
            fs_utils.mkdir(os.path.join(pascal_dataset_path, subdir))

        samples_by_tags = defaultdict(list)  # TRAIN: [img_1, img2, ..]

        for item_name in dataset:
            img_path, ann_path = dataset.get_item_paths(item_name)
            no_ext_name = fs_utils.get_file_name(item_name)
            pascal_img_path = os.path.join(images_dir,
                                           no_ext_name + OUT_IMG_EXT)
            pascal_ann_path = os.path.join(anns_dir, no_ext_name + XML_EXT)

            if item_name.endswith(OUT_IMG_EXT):
                fs_utils.copy_file(img_path, pascal_img_path)
            else:
                img = image_utils.read(img_path)
                image_utils.write(pascal_img_path, img)

            ann = Annotation.load_json_file(ann_path,
                                            project_meta=project.meta)

            # Read tags for images lists generation
            for tag in ann.img_tags:
                samples_by_tags[tag.name].append(
                    (no_ext_name, len(ann.labels)))

            writer = pascal_voc_writer.Writer(
                path=pascal_dataset_relative_path,
                width=ann.img_size[1],
                height=ann.img_size[0])

            for label in ann.labels:
                obj_class = label.obj_class
                rect: Rectangle = label.geometry.to_bbox()
                writer.addObject(name=obj_class.name,
                                 xmin=rect.left,
                                 ymin=rect.top,
                                 xmax=rect.right,
                                 ymax=rect.bottom)
            writer.save(pascal_ann_path)

        save_images_lists(lists_dir, samples_by_tags)
Exemplo n.º 5
0
def samples_by_tags(required_tags, project):
    """
    Split samples from project by tags
    :param required_tags: list of tags names
    :param project: supervisely `Project` class object
    :return:
    """
    img_annotations_groups = defaultdict(list)
    for dataset in project:
        for item_name in dataset:
            item_paths = dataset.get_item_paths(item_name)
            ann = Annotation.load_json_file(path=item_paths.ann_path,
                                            project_meta=project.meta)
            img_tags = ann.img_tags
            for required_tag in required_tags:
                if img_tags.has_key(required_tag):
                    # TODO migrate to ItemPath objects for img_annotations_groups
                    img_annotations_groups[required_tag].append(
                        (item_paths.img_path, item_paths.ann_path))
    return img_annotations_groups
Exemplo n.º 6
0
def ensure_samples_nonempty(samples, tag_name, project_meta):
    """

    Args:
        samples: list of pairs (image path, annotation path).
        tag_name: tag name for messages.
        project_meta: input project meta object.
    Returns: None

    """
    if len(samples) < 1:
        raise RuntimeError(
            'There are no annotations with tag "{}"'.format(tag_name))

    for _, ann_path in samples:
        ann = Annotation.load_json_file(ann_path, project_meta)
        if len(ann.labels) > 0:
            return

    raise RuntimeError(
        'There are no objects in annotations with tag "{}"'.format(tag_name))
def single_inference_process_fn(inference_initializer, inference_mode_config,
                                in_project_meta_json, request_queue,
                                result_meta_queue, progress_queue, project):
    """Loads a separate model, processes requests from request_queue, results go to result_queue.

    None request signals the process to finish.
    """
    single_image_inference = inference_initializer()
    inference_mode = InferenceModeFactory.create(
        inference_mode_config, ProjectMeta.from_json(in_project_meta_json),
        single_image_inference)

    project_meta_sent = False
    req = ''
    while req is not None:
        req = request_queue.get()
        if req is not None:
            # Send the resulting project meta to the parent project to make sure we only write the meta JSON once.
            if not project_meta_sent:
                try:
                    result_meta_queue.put(inference_mode.out_meta.to_json(),
                                          block=False)
                except queue.Full:
                    pass
            project_meta_sent = True

            in_ann = Annotation.load_json_file(req.item_paths.ann_path,
                                               inference_mode.out_meta)
            ann = inference_mode.infer_annotate_image_file(
                req.item_paths.img_path, in_ann)
            out_dataset = project.datasets.get(req.ds_name)
            out_dataset.add_item_file(req.item_name,
                                      req.item_paths.img_path,
                                      ann=ann,
                                      _validate_img=False,
                                      _use_hardlink=True)
            progress_queue.put(1)
Exemplo n.º 8
0
 def load_annotation(self, fpath):
     # will not resize figures: resize gt instead
     return Annotation.load_json_file(fpath, self._project_meta)