예제 #1
0
    def run_inference(self):
        inference_mode = InferenceModeFactory.create(
            self._inference_mode_config, self._in_project.meta,
            self._single_image_inference)
        out_project = Project(
            os.path.join(TaskPaths.RESULTS_DIR, self._in_project.name),
            OpenMode.CREATE)
        out_project.set_meta(inference_mode.out_meta)

        progress_bar = Progress('Model applying: ',
                                self._in_project.total_items)
        for in_dataset in self._in_project:
            out_dataset = out_project.create_dataset(in_dataset.name)
            for in_item_name in in_dataset:
                # Use output project meta so that we get an annotation that is already in the context of the output
                # project (with added object classes etc).
                in_item_paths = in_dataset.get_item_paths(in_item_name)
                in_img = sly_image.read(in_item_paths.img_path)
                in_ann = Annotation.load_json_file(in_item_paths.ann_path,
                                                   inference_mode.out_meta)
                logger.trace('Will process image',
                             extra={
                                 'dataset_name': in_dataset.name,
                                 'image_name': in_item_name
                             })
                inference_annotation = inference_mode.infer_annotate(
                    in_img, in_ann)
                out_dataset.add_item_file(in_item_name,
                                          in_item_paths.img_path,
                                          ann=inference_annotation)

                progress_bar.iter_done_report()

        report_inference_finished()
예제 #2
0
def single_inference_process_fn(inference_initializer, inference_mode_config,
                                in_project_meta_json, request_queue,
                                response_queue):
    """Loads a separate model, processes requests from request_queue, results go to result_queue.

    None request signals the process to finish.
    """
    single_image_inference = inference_initializer()
    inference_mode = InferenceModeFactory.create(
        inference_mode_config, ProjectMeta.from_json(in_project_meta_json),
        single_image_inference)
    out_meta_json = inference_mode.out_meta.to_json()

    req = ''
    while req is not None:
        req = request_queue.get()
        if req is not None:
            in_img = sly_image.read(req.item_paths.img_path)
            in_ann = Annotation.load_json_file(req.item_paths.ann_path,
                                               inference_mode.out_meta)
            ann = inference_mode.infer_annotate(in_img, in_ann)
            resp = InferenceResponse(ds_name=req.ds_name,
                                     item_name=req.item_name,
                                     item_paths=req.item_paths,
                                     ann_json=ann.to_json(),
                                     meta_json=out_meta_json)
            response_queue.put(resp)
        request_queue.task_done()
def single_inference_process_fn(inference_initializer, inference_mode_config,
                                in_project_meta_json, request_queue,
                                result_meta_queue, progress_queue, project):
    """Loads a separate model, processes requests from request_queue, results go to result_queue.

    None request signals the process to finish.
    """
    single_image_inference = inference_initializer()
    inference_mode = InferenceModeFactory.create(
        inference_mode_config, ProjectMeta.from_json(in_project_meta_json),
        single_image_inference)

    project_meta_sent = False
    req = ''
    while req is not None:
        req = request_queue.get()
        if req is not None:
            # Send the resulting project meta to the parent project to make sure we only write the meta JSON once.
            if not project_meta_sent:
                try:
                    result_meta_queue.put(inference_mode.out_meta.to_json(),
                                          block=False)
                except queue.Full:
                    pass
            project_meta_sent = True

            in_ann = Annotation.load_json_file(req.item_paths.ann_path,
                                               inference_mode.out_meta)
            ann = inference_mode.infer_annotate_image_file(
                req.item_paths.img_path, in_ann)
            out_dataset = project.datasets.get(req.ds_name)
            out_dataset.add_item_file(req.item_name,
                                      req.item_paths.img_path,
                                      ann=ann,
                                      _validate_img=False,
                                      _use_hardlink=True)
            progress_queue.put(1)
 def _make_inference_mode(self, msg_inference_mode_config, in_project_meta):
     inference_mode_config = get_effective_inference_mode_config(
         msg_inference_mode_config,
         deepcopy(self._default_inference_mode_config))
     return InferenceModeFactory.create(inference_mode_config,
                                        in_project_meta, self.model_applier)