def populate_inference_requests_queue(in_project, inference_processes,
                                      request_queue):
    for in_dataset in in_project:
        for in_item_name in in_dataset:
            logger.trace('Will process image',
                         extra={
                             'dataset_name': in_dataset.name,
                             'image_name': in_item_name
                         })
            in_item_paths = in_dataset.get_item_paths(in_item_name)
            req = InferenceRequest(ds_name=in_dataset.name,
                                   item_name=in_item_name,
                                   item_paths=in_item_paths)
            while True:
                try:
                    # Set a finite timeout for the queue insertion attempt to prevent deadlocks if all of the
                    # inference processes die in the interim.
                    request_queue.put(req, timeout=0.1)
                    break
                except queue.Full:
                    # If any of the inference processes has died, stop populating the requests queue and exit right
                    # away. Otherwise a deadlock is possible if no inference processes survive to take requests off
                    # the queue.
                    if not all(p.is_alive() for p in inference_processes):
                        # Early exit, return False to indicate failure.
                        return False
    return True
Пример #2
0
    def run_inference(self):
        inference_mode = InferenceModeFactory.create(
            self._inference_mode_config, self._in_project.meta,
            self._single_image_inference)
        out_project = Project(
            os.path.join(TaskPaths.RESULTS_DIR, self._in_project.name),
            OpenMode.CREATE)
        out_project.set_meta(inference_mode.out_meta)

        progress_bar = Progress('Model applying: ',
                                self._in_project.total_items)
        for in_dataset in self._in_project:
            out_dataset = out_project.create_dataset(in_dataset.name)
            for in_item_name in in_dataset:
                # Use output project meta so that we get an annotation that is already in the context of the output
                # project (with added object classes etc).
                in_item_paths = in_dataset.get_item_paths(in_item_name)
                in_img = sly_image.read(in_item_paths.img_path)
                in_ann = Annotation.load_json_file(in_item_paths.ann_path,
                                                   inference_mode.out_meta)
                logger.trace('Will process image',
                             extra={
                                 'dataset_name': in_dataset.name,
                                 'image_name': in_item_name
                             })
                inference_annotation = inference_mode.infer_annotate(
                    in_img, in_ann)
                out_dataset.add_item_file(in_item_name,
                                          in_item_paths.img_path,
                                          ann=inference_annotation)

                progress_bar.iter_done_report()

        report_inference_finished()
Пример #3
0
def populate_inference_requests_queue(in_project, inference_processes,
                                      request_queue):
    for in_dataset in in_project:
        for in_item_name in in_dataset:
            while True:
                # If any of the inference processes has died, stop populating the requests queue and exit right away.
                # Otherwise a deadlock is possible if no inference processes survive to take requests off the queue.
                if all(p.is_alive() for p in inference_processes):
                    # Do not try to add requests to a full queue to prevent deadlocks if all of the inference processes
                    # die in the interim.
                    if not request_queue.full():
                        logger.trace('Will process image',
                                     extra={
                                         'dataset_name': in_dataset.name,
                                         'image_name': in_item_name
                                     })
                        in_item_paths = in_dataset.get_item_paths(in_item_name)
                        req = InferenceRequest(ds_name=in_dataset.name,
                                               item_name=in_item_name,
                                               item_paths=in_item_paths)
                        request_queue.put(req)
                        break  # out of (while True).
                    else:
                        time.sleep(0.1)
                else:
                    # Early exit, return False to indicate failure.
                    return False
    return True
Пример #4
0
    def _process_sample(self, x, target, output):
        timg = TableImg((x.shape[1], x.shape[0]), (2, 2))

        bounds = np.min(x), np.max(x)
        x = (x - bounds[0]) / (bounds[1] - bounds[0]) * 255
        x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)
        timg.paste_item(x, 0, 0)
        timg.paste_item(x, 0, 1)

        sq_target = np.clip(target * self.target_multi, 0, 255)
        timg.paste_item(sq_target, 1, 0)

        sq_output = np.expand_dims(np.argmax(output, axis=2), axis=2)
        sq_output = np.clip(sq_output * self.target_multi, 0, 255)
        timg.paste_item(sq_output, 1, 1)

        out_img = timg.img
        if self.out_scale != 1:
            out_img = sly.image.resize_inter_nearest(out_img,
                                                     frow=self.out_scale,
                                                     fcol=self.out_scale)

        ofpath = os.path.join(self.odir, '{:08}.png'.format(self._next_idx))
        cv2.imwrite(ofpath, out_img)
        self._next_idx += 1
        logger.trace('Saved debug patch: {}'.format(ofpath))
Пример #5
0
    def run_inference(self):
        out_project_fs = copy(self.in_project_fs)
        out_project_fs.root_path = self.helper.paths.results_dir
        out_project_fs.make_dirs()

        out_pr_meta = self.inf_feeder.out_meta
        out_pr_meta.to_dir(out_project_fs.project_path)

        ia_cnt = out_project_fs.pr_structure.image_cnt
        progress = sly.progress_counter_inference(cnt_imgs=ia_cnt)

        for sample in self.in_project_fs:
            logger.trace('Will process image',
                         extra={'dataset_name': sample.ds_name, 'image_name': sample.image_name})
            ann_packed = sly.json_load(sample.ann_path)
            ann = sly.Annotation.from_packed(ann_packed, self.helper.in_project_meta)

            img = cv2.imread(sample.img_path)[:, :, ::-1]
            res_ann = self.inf_feeder.feed(img, ann, self._infer_on_img_legacy)

            out_ann_fpath = out_project_fs.ann_path(sample.ds_name, sample.image_name)
            res_ann_packed = res_ann.pack()
            sly.json_dump(res_ann_packed, out_ann_fpath)

            if self.debug_copy_images:
                out_img_fpath = out_project_fs.img_path(sample.ds_name, sample.image_name)
                sly.ensure_base_path(out_img_fpath)
                shutil.copy(sample.img_path, out_img_fpath)

            progress.iter_done_report()

        sly.report_inference_finished()
Пример #6
0
def main():
    sly.task_verification(check_in_graph)

    logger.info('DTL started')
    helper = sly.DtlHelper()
    net = Net(helper.graph, helper.in_project_metas, helper.paths.results_dir)
    helper.save_res_meta(net.get_result_project_meta())

    # is_archive = net.is_archive()
    results_counter = 0
    for pr_name, pr_dir in helper.in_project_dirs.items():
        root_path, project_name = sly.ProjectFS.split_dir_project(pr_dir)
        project_fs = sly.ProjectFS.from_disk(root_path, project_name, by_annotations=True)
        progress = sly.progress_counter_dtl(pr_name, project_fs.image_cnt)
        for sample in project_fs:
            try:
                img_desc = sly.ImageDescriptor(sample)
                ann = sly.json_load(sample.ann_path)
                data_el = (img_desc, ann)
                export_output_generator = net.start(data_el)
                for res_export in export_output_generator:
                    logger.trace("image processed", extra={'img_name': res_export[0][0].get_img_name()})
                    results_counter += 1
            except Exception:
                ex = {
                    'project_name': sample.project_name,
                    'ds_name': sample.ds_name,
                    'image_name': sample.image_name
                }
                logger.warn('Image was skipped because some error occured', exc_info=True, extra=ex)
            progress.iter_done_report()

    logger.info('DTL finished', extra={'event_type': EventType.DTL_APPLIED, 'new_proj_size': results_counter})