Ejemplo n.º 1
0
    def run_inference(self):
        inference_mode = InferenceModeFactory.create(
            self._inference_mode_config, self._in_project.meta,
            self._single_image_inference)
        out_project = Project(
            os.path.join(TaskPaths.RESULTS_DIR, self._in_project.name),
            OpenMode.CREATE)
        out_project.set_meta(inference_mode.out_meta)

        progress_bar = Progress('Model applying: ',
                                self._in_project.total_items)
        for in_dataset in self._in_project:
            out_dataset = out_project.create_dataset(in_dataset.name)
            for in_item_name in in_dataset:
                # Use output project meta so that we get an annotation that is already in the context of the output
                # project (with added object classes etc).
                in_item_paths = in_dataset.get_item_paths(in_item_name)
                in_img = sly_image.read(in_item_paths.img_path)
                in_ann = Annotation.load_json_file(in_item_paths.ann_path,
                                                   inference_mode.out_meta)
                logger.trace('Will process image',
                             extra={
                                 'dataset_name': in_dataset.name,
                                 'image_name': in_item_name
                             })
                inference_annotation = inference_mode.infer_annotate(
                    in_img, in_ann)
                out_dataset.add_item_file(in_item_name,
                                          in_item_paths.img_path,
                                          ann=inference_annotation)

                progress_bar.iter_done_report()

        report_inference_finished()
    def run_inference(self):
        progress_report_thread = Thread(target=progress_report_thread_fn,
                                        args=(self._in_project,
                                              self._progress_report_queue),
                                        daemon=True)
        progress_report_thread.start()

        feed_status = populate_inference_requests_queue(
            self._in_project, self._inference_processes,
            self._inference_request_queue)
        for _ in self._inference_processes:
            self._inference_request_queue.put(None)

        out_meta_json = self._result_meta_queue.get()
        self._out_project.set_meta(ProjectMeta.from_json(out_meta_json))

        for p in self._inference_processes:
            p.join()

        if not feed_status or not all(p.exitcode == 0
                                      for p in self._inference_processes):
            raise RuntimeError(
                'One of the inference processes encountered an error.')

        self._progress_report_queue.put(None)
        progress_report_thread.join()
        report_inference_finished()
Ejemplo n.º 3
0
    def run_inference(self):
        result_writer_thread = Thread(target=result_writer_thread_fn,
                                      args=(self._in_project,
                                            self._inference_result_queue),
                                      daemon=True)
        result_writer_thread.start()

        feed_status = populate_inference_requests_queue(
            self._in_project, self._inference_processes,
            self._inference_request_queue)
        for _ in self._inference_processes:
            self._inference_request_queue.put(None)
        for p in self._inference_processes:
            p.join()

        if not feed_status or not all(p.exitcode == 0
                                      for p in self._inference_processes):
            raise RuntimeError(
                'One of the inference processes encountered an error.')

        self._inference_result_queue.put(None)
        result_writer_thread.join()
        report_inference_finished()