def single_inference_process_fn(inference_initializer, inference_mode_config, in_project_meta_json, request_queue, response_queue): """Loads a separate model, processes requests from request_queue, results go to result_queue. None request signals the process to finish. """ single_image_inference = inference_initializer() inference_mode = InferenceModeFactory.create( inference_mode_config, ProjectMeta.from_json(in_project_meta_json), single_image_inference) out_meta_json = inference_mode.out_meta.to_json() req = '' while req is not None: req = request_queue.get() if req is not None: in_img = sly_image.read(req.item_paths.img_path) in_ann = Annotation.load_json_file(req.item_paths.ann_path, inference_mode.out_meta) ann = inference_mode.infer_annotate(in_img, in_ann) resp = InferenceResponse(ds_name=req.ds_name, item_name=req.item_name, item_paths=req.item_paths, ann_json=ann.to_json(), meta_json=out_meta_json) response_queue.put(resp) request_queue.task_done()
def run_inference(self): inference_mode = InferenceModeFactory.create( self._inference_mode_config, self._in_project.meta, self._single_image_inference) out_project = Project( os.path.join(TaskPaths.RESULTS_DIR, self._in_project.name), OpenMode.CREATE) out_project.set_meta(inference_mode.out_meta) progress_bar = Progress('Model applying: ', self._in_project.total_items) for in_dataset in self._in_project: out_dataset = out_project.create_dataset(in_dataset.name) for in_item_name in in_dataset: # Use output project meta so that we get an annotation that is already in the context of the output # project (with added object classes etc). in_item_paths = in_dataset.get_item_paths(in_item_name) in_img = sly_image.read(in_item_paths.img_path) in_ann = Annotation.load_json_file(in_item_paths.ann_path, inference_mode.out_meta) logger.trace('Will process image', extra={ 'dataset_name': in_dataset.name, 'image_name': in_item_name }) inference_annotation = inference_mode.infer_annotate( in_img, in_ann) out_dataset.add_item_file(in_item_name, in_item_paths.img_path, ann=inference_annotation) progress_bar.iter_done_report() report_inference_finished()
def save_project_as_pascal_voc_detection(save_path, project: Project): # Create root pascal 'datasets' folders for dataset in project.datasets: pascal_dataset_path = os.path.join(save_path, dataset.name) pascal_dataset_relative_path = os.path.relpath(pascal_dataset_path, save_path) images_dir = os.path.join(pascal_dataset_path, 'JPEGImages') anns_dir = os.path.join(pascal_dataset_path, 'Annotations') lists_dir = os.path.join(pascal_dataset_path, 'ImageSets/Layout') fs_utils.mkdir(pascal_dataset_path) for subdir in [ 'ImageSets', # Train list, Val list, etc. 'ImageSets/Layout', 'Annotations', 'JPEGImages' ]: fs_utils.mkdir(os.path.join(pascal_dataset_path, subdir)) samples_by_tags = defaultdict(list) # TRAIN: [img_1, img2, ..] for item_name in dataset: img_path, ann_path = dataset.get_item_paths(item_name) no_ext_name = fs_utils.get_file_name(item_name) pascal_img_path = os.path.join(images_dir, no_ext_name + OUT_IMG_EXT) pascal_ann_path = os.path.join(anns_dir, no_ext_name + XML_EXT) if item_name.endswith(OUT_IMG_EXT): fs_utils.copy_file(img_path, pascal_img_path) else: img = image_utils.read(img_path) image_utils.write(pascal_img_path, img) ann = Annotation.load_json_file(ann_path, project_meta=project.meta) # Read tags for images lists generation for tag in ann.img_tags: samples_by_tags[tag.name].append( (no_ext_name, len(ann.labels))) writer = pascal_voc_writer.Writer( path=pascal_dataset_relative_path, width=ann.img_size[1], height=ann.img_size[0]) for label in ann.labels: obj_class = label.obj_class rect: Rectangle = label.geometry.to_bbox() writer.addObject(name=obj_class.name, xmin=rect.left, ymin=rect.top, xmax=rect.right, ymax=rect.bottom) writer.save(pascal_ann_path) save_images_lists(lists_dir, samples_by_tags)
def _get_empty_annotaion(self, item_name): ''' Create empty annotation from given item. Generate exception error if item not found in project :param item_name: str :return: Annotation class object ''' img_size = sly_image.read(self.get_img_path(item_name)).shape[:2] return self.annotation_class(img_size)
def from_img_path(cls, img_path): ''' The function from_img_path download image on the given path and return size of the image :param img_path: the path to the input image :return: size of the image ''' img = sly_image.read(img_path) img_size = img.shape[:2] return cls(img_size)
def _set_ann_by_type(self, item_name, ann): if ann is None: img_size = sly_image.read(self.get_img_path(item_name)).shape[:2] self.set_ann(item_name, Annotation(img_size)) elif type(ann) is Annotation: self.set_ann(item_name, ann) elif type(ann) is str: self.set_ann_file(item_name, ann) elif type(ann) is dict: self.set_ann_dict(item_name, ann) else: raise TypeError("Unsupported type {!r} for ann argument".format( type(ann)))
def _set_ann_by_type(self, item_name, ann): if ann is None: img_path = self.deprecated_make_img_path( item_name, self._items_exts[item_name]) img_size = image.read(img_path).shape[:2] self.set_ann(item_name, Annotation(img_size)) elif type(ann) is Annotation: self.set_ann(item_name, ann) elif type(ann) is str: self.set_ann_file(item_name, ann) elif type(ann) is dict: self.set_ann_dict(item_name, ann) else: raise TypeError("Unsupported type {!r} for ann argument".format( type(ann)))
def _add_ann_by_type(self, item_name, ann): # This is a new-style annotation name, so if there was no image with this name yet, there should not have been # an annotation either. self._item_to_ann[item_name] = item_name + ANN_EXT if ann is None: img_size = sly_image.read(self.get_img_path(item_name)).shape[:2] self.set_ann(item_name, Annotation(img_size)) elif type(ann) is Annotation: self.set_ann(item_name, ann) elif type(ann) is str: self.set_ann_file(item_name, ann) elif type(ann) is dict: self.set_ann_dict(item_name, ann) else: raise TypeError("Unsupported type {!r} for ann argument".format( type(ann)))
def _get_sample_impl(self, img_fpath, ann_fpath): img = sly_image.read(img_fpath) ann = self.load_annotation(ann_fpath) gt = self.make_gt(img.shape, ann) img = sly_image.resize(img, self._out_size) return img, gt
def read_image_fn(img_path_bytes): return sly_image.read(img_path_bytes.decode('utf-8'))
def from_img_path(cls, img_path): img = sly_image.read(img_path) img_size = img.shape[:2] return cls(img_size)
def _get_empty_annotaion(self, item_name): img_size = sly_image.read(self.get_img_path(item_name)).shape[:2] return self.annotation_class(img_size)