def get_ann(img_path, inst_path): global classes_dict ann = sly.Annotation.from_img_path(img_path) instance_img = sly.image.read(inst_path) img_gray = cv2.cvtColor(instance_img, cv2.COLOR_BGR2GRAY) _, mask_foreground = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY) mask_background = (img_gray == 0) class_name = 'background' color = [1, 1, 1] bitmap = sly.Bitmap(data=mask_background) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) mask_foreground = mask_foreground.astype(np.bool) if np.any(mask_foreground): class_name = 'object' color = [255, 255, 255] bitmap = sly.Bitmap(data=mask_foreground) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def process_meta(input_meta): output_meta = sly.ProjectMeta(obj_classes=None, img_tag_metas=input_meta.img_tag_metas, objtag_metas=input_meta.obj_tags) for obj_class in input_meta.obj_classes: if obj_class.name in classes_mapping.keys() or obj_class.name in classes_mapping.values(): output_meta = output_meta.add_obj_class(obj_class) for gt_class in classes_mapping: output_meta = output_meta.add_obj_class(sly.ObjClass(make_false_positive_name(gt_class), sly.Bitmap)) output_meta = output_meta.add_obj_class(sly.ObjClass(make_false_negative_name(gt_class), sly.Bitmap)) output_meta = output_meta.add_img_tag_meta(sly.TagMeta(make_iou_tag_name(gt_class), sly.TagValueType.ANY_NUMBER)) return output_meta
def set_project_meta(api, project_id, state): fg_class = sly.ObjClass(state[const.FG_NAME], GET_GEOMETRY_FROM_STR(state[const.FG_SHAPE]), color=sly.color.hex2rgb(state[const.FG_COLOR])) st_class = sly.ObjClass(state[const.ST_NAME], GET_GEOMETRY_FROM_STR(state[const.ST_SHAPE]), color=sly.color.hex2rgb(state[const.ST_COLOR])) meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection([fg_class, st_class])) api.project.update_meta( project_id, sly.ProjectMeta().to_json()) # clear previous labels and classes api.project.update_meta(project_id, meta.to_json()) return fg_class, st_class
def test_from_supervisely(): import supervisely_lib as sly data_path = "./data/test_supervisely/from_to" if os.path.exists(data_path): shutil.rmtree(data_path) project_name = "pixel_project" project_path = os.path.join(data_path, project_name) project = sly.Project(project_path, sly.OpenMode.CREATE) init_meta = project.meta project.meta._project_type = "images" project_ds = project.create_dataset(project_name) img = np.ones((30, 30, 3)) project_ds.add_item_np("pixel.jpeg", img) item_path, item_ann_path = project_ds.get_item_paths("pixel.jpeg") ann = sly.Annotation.load_json_file(item_ann_path, project.meta) bbox_class = sly.ObjClass(name="_bbox", geometry_type=sly.Rectangle) meta_with_bboxes = project.meta.add_obj_classes([bbox_class]) bbox_label = sly.Label( geometry=sly.Rectangle(0, 0, 10, 10), obj_class=meta_with_bboxes.obj_classes.get("_bbox"), ) ann_with_bboxes = ann.add_labels([bbox_label]) project_ds.set_ann("pixel.jpeg", ann_with_bboxes) project.set_meta(meta_with_bboxes) trans = hub.Dataset.from_supervisely(project) dataset = trans.store(os.path.join(data_path, "pixel_dataset_bbox")) project_back = dataset.to_supervisely( os.path.join(data_path, "pixel_project_bbox_back")) project.set_meta(init_meta) poly_class = sly.ObjClass(name="_poly", geometry_type=sly.Polygon) meta_with_poly = project.meta.add_obj_classes([poly_class]) points = [[0, 0], [0, 10], [10, 0], [10, 10]] point_loc_points = [ sly.geometry.point_location.PointLocation(*point) for point in points ] poly_label = sly.Label( geometry=sly.Polygon(exterior=point_loc_points, interior=[]), obj_class=meta_with_poly.obj_classes.get("_poly"), ) ann_with_polys = ann.add_labels([poly_label]) project_ds.set_ann("pixel.jpeg", ann_with_polys) project.set_meta(meta_with_poly) trans = hub.Dataset.from_supervisely(project) dataset = trans.store(os.path.join(data_path, "pixel_dataset_poly")) project_back = dataset.to_supervisely( os.path.join(data_path, "pixel_project_poly_back"))
def _generate_annotation(self, src_img_path, inst_path): ann = sly.Annotation.from_img_path(src_img_path) if os.path.isfile(inst_path): instance_img = self._read_img_unchanged(inst_path) col2coord = get_col2coord(instance_img) curr_col2cls = { col: self.cls_names[int( col // 256)] # some dirty hack to determine class correctly for col, coord in col2coord.items() } for color, class_name in curr_col2cls.items(): mask = instance_img == color # exact match for 1d uint16 bitmap = sly.Bitmap(data=mask) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=self.cls2col.get( class_name, sly.color.random_rgb())) if not self.obj_classes.has_key(class_name): self.obj_classes = self.obj_classes.add(obj_class) ann = ann.add_label(sly.Label(bitmap, obj_class)) instance_img[mask] = 0 # to check missing colors, see below if np.sum(instance_img) > 0: sly.logger.warn( 'Not all objects or classes are captured from source segmentation.', extra={}) return ann
def get_ann(img_path, inst_path): global classes_dict ann = sly.Annotation.from_img_path(img_path) class_name = 'text' color = [255, 0, 255] if inst_path is not None: with open(inst_path, "r") as file: all_lines = file.readlines() for line in all_lines: line = line.strip('\n').split(',')[:9] text = line[8] if text == '###': text = '' line = line[:8] try: line = list(map(lambda i: int(i), line)) except ValueError: line[0] = line[0][1:] line = list(map(lambda i: int(i), line)) points = [ sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2) ] polygon = sly.Polygon(exterior=points, interior=[]) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color) classes_dict = classes_dict.add( obj_class) # make it for meta.json ann = ann.add_label( sly.Label(polygon, classes_dict.get(class_name), None, text)) return ann
def get_ann(img_path, inst_path, default_classes_colors, default_colors_classes): global classes_dict global count_of_colors ann = sly.Annotation.from_img_path(img_path) curr_color_to_class = {} if inst_path is not None: instance_img = sly.image.read(inst_path) instance_img[np.where((instance_img == [0, 0, 0]).all(axis=2))] = [1, 1, 1] colored_img = instance_img * 10 instance_img = instance_img * 10 unique_colors = np.unique(instance_img.reshape(-1, instance_img.shape[2]), axis=0) ann_colors = np.array(unique_colors).tolist() for color in ann_colors: if not color in default_classes_colors.values(): default_classes_colors['object{}'.format(count_of_colors)] = color default_colors_classes[tuple(color)] = 'object{}'.format(count_of_colors) curr_color_to_class[tuple(color)] = 'object{}'.format(count_of_colors) count_of_colors += 1 else: curr_color_to_class[tuple(color)] = default_colors_classes[tuple(color)] for color, class_name in curr_color_to_class.items(): mask = np.all(colored_img == color, axis=2) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(color)) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, default_classes_colors): global classes_dict instance_img = sly.image.read(inst_path) colored_img = instance_img ann = sly.Annotation.from_img_path(img_path) unique_colors = np.unique(instance_img.reshape(-1, instance_img.shape[2]), axis=0) for color in unique_colors: mask = np.all(colored_img == color, axis=2) class_name = default_classes_colors[tuple(color)] mask = mask.astype(np.uint8) * 128 im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(mask.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [arr], (254, 254, 254)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(color)) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: instance_img = sly.image.read(inst_path) instance_img = instance_img[:, :, 0] + 1 current_color2class = {} temp = np.unique(instance_img) for pixel in temp: current_color2class[pixel] = number_class[pixel] for pixel, class_name in current_color2class.items(): new_color = pixel_color[pixel] imgray = np.where(instance_img == pixel, instance_img, 0) ret, thresh = cv2.threshold(imgray, 1, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [np.int32(arr)], (255, 255, 255)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict instance_img = [] with open(inst_path) as file: for line in file: line = line.split('\n')[0] line = line.split(' ') instance_img.append(line) instance_img = np.array(instance_img, np.uint8) instance_img = instance_img + 2 curr_color_to_class = {} temp = np.unique(instance_img) for pixel in temp: if pixel == 255: continue curr_color_to_class[pixel] = number_class[pixel] ann = sly.Annotation.from_img_path(img_path) for color, class_name in curr_color_to_class.items(): new_color = list(pixel_color[color]) mask = np.where(instance_img == color, instance_img, 0) im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [np.int32(arr)], (254, 254, 254)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: mat = scipy.io.loadmat(inst_path) instance_img = mat['LabelMap'] colored_img = cv2.merge((instance_img, instance_img, instance_img)) current_color_to_class = {} temp = np.unique(instance_img) for pixel in temp: current_color_to_class[pixel] = number_class[str(pixel)] for pixel, class_name in current_color_to_class.items(): mask = np.all(colored_img == pixel, axis=2) # exact match (3-channel img & rgb color) new_color = pixel_color[str(pixel)] bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) # clear used pixels in mask to check missing colors, see below colored_img[mask] = (0, 0, 0) if np.sum(colored_img) > 0: sly.logger.warn('Not all objects or classes are captured from source segmentation.') return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict ann = sly.Annotation.from_img_path(img_path) mat = scipy.io.loadmat(inst_path) instance_img = mat['MM'][0][0][0] instance_img = instance_img.astype(np.uint8) + 1 colored_img = instance_img unique_pixels = np.unique(instance_img) for pixel in unique_pixels: color = pixel_color[pixel] class_name = number_class[pixel] imgray = np.where(colored_img == pixel, colored_img, 0) ret, thresh = cv2.threshold(imgray, 1, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [arr], (255, 255, 255)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(color)) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def create_obj_class_collection( classes_mapping: Dict) -> sly.ObjClassCollection: cls_list = [ sly.ObjClass(cls_name, sly.Bitmap) for cls_name in classes_mapping.keys() ] return sly.ObjClassCollection(cls_list)
def _get_ann(self, img_path, segm_path, inst_path): segmentation_img = sly.image.read(segm_path) if inst_path is not None: instance_img = sly.image.read(inst_path) colored_img = instance_img instance_img16 = instance_img.astype(np.uint16) col2coord = get_col2coord(instance_img16) curr_col2cls = ((col, self.color2class_name.get(tuple(segmentation_img[coord]))) for col, coord in col2coord.items()) curr_col2cls = {k: v for k, v in curr_col2cls if v is not None} # _instance_ color -> class name else: colored_img = segmentation_img curr_col2cls = self.color2class_name ann = sly.Annotation.from_img_path(img_path) for color, class_name in curr_col2cls.items(): mask = np.all(colored_img == color, axis=2) # exact match (3-channel img & rgb color) bitmap = sly.Bitmap(data=mask) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) if not self.obj_classes.has_key(class_name): self.obj_classes = self.obj_classes.add(obj_class) ann = ann.add_label(sly.Label(bitmap, obj_class)) # clear used pixels in mask to check missing colors, see below colored_img[mask] = (0, 0, 0) if np.sum(colored_img) > 0: sly.logger.warn('Not all objects or classes are captured from source segmentation.') return ann
def get_ann(img_path, inst_path, default_classes_colors): global classes_dict ann = sly.Annotation.from_img_path(img_path) instance_img = sly.image.read(inst_path) img_gray = cv2.cvtColor(instance_img, cv2.COLOR_BGR2GRAY) _, mask_foreground = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY) mask_background = (img_gray == 0) class_name = 'background' new_color = default_classes_colors[class_name] bitmap = sly.Bitmap(data=mask_background) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) im2, contours, hierarchy = cv2.findContours(mask_foreground, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) class_name = 'skin' new_color = default_classes_colors[class_name] for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [np.int32(arr)], (255, 255, 255)) mask_temp = cv2.split(mask_temp)[0] mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def create_meta(config, side): meta = sly.ProjectMeta() classes = [ sly.ObjClass(class_name, sly.AnyGeometry) for class_name in config[side] ] meta = meta.add_obj_classes(classes) meta = meta.add_tag_metas([ sly.TagMeta("case_id", sly.TagValueType.ANY_STRING), sly.TagMeta("validation", sly.TagValueType.ONEOF_STRING, possible_values=["accepted", "rejected"]), sly.TagMeta("finished", sly.TagValueType.NONE) ]) return meta
def process_meta(input_meta): classes_mapping = {} output_meta = sly.ProjectMeta(obj_classes=[], tag_metas=input_meta.tag_metas) for obj_class in input_meta.obj_classes: classes_mapping[obj_class.name] = '{}_bbox'.format(obj_class.name) new_obj_class = sly.ObjClass(classes_mapping[obj_class.name], sly.Rectangle, color=obj_class.color) output_meta = output_meta.add_obj_class(new_obj_class) output_meta = output_meta.add_tag_meta( sly.TagMeta('train', sly.TagValueType.NONE)) output_meta = output_meta.add_tag_meta( sly.TagMeta('val', sly.TagValueType.NONE)) return output_meta, classes_mapping
def _read_colors(self): if os.path.isfile(self.colors_file): sly.logger.info('Will try to read segmentation colors from provided file.') color_info = load_json_file(self.colors_file) else: sly.logger.info('Will use default Mapillary color mapping.') default_filepath = os.path.join(os.path.dirname(__file__), 'colors.json') color_info = load_json_file(default_filepath) self._class_id_to_object_class = { color_id: sly.ObjClass(name=el['readable'], geometry_type=sly.Bitmap, color=el['color']) for color_id, el in enumerate(color_info['labels'])} sly.logger.info('Found {} class(es).'.format(len(self._class_id_to_object_class)), extra={ 'classes': list(obj_class.name for obj_class in self._class_id_to_object_class.values())}) self._instance_id_to_obj_class = InstanceIdToObjClass(self._class_id_to_object_class)
def __init__(self, class_title_to_color): self.class_title_to_color = class_title_to_color self.color_id_to_class_title = { code_color(*color): class_title for class_title, color in class_title_to_color.items() } self.id_to_obj_class = { color_id: sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=self.class_title_to_color[class_name]) for color_id, class_name in self.color_id_to_class_title.items() } self.settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) self.src_datasets = self.read_datasets_from_path()
def _read_colors(self): if os.path.isfile(self.colors_file): sly.logger.info('Will try to read segmentation colors from provided file.') in_lines = filter(None, map(str.strip, open(self.colors_file, 'r').readlines())) in_splitted = (x.split() for x in in_lines) # Format: {name: (R, G, B)}, values [0; 255] self.cls2col = {x[0]: (int(x[1]), int(x[2]), int(x[3])) for x in in_splitted} else: sly.logger.info('Will use default PascalVOC color mapping.') self.cls2col = default_classes_colors obj_classes_list = [sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) for class_name, color in self.cls2col.items()] self.obj_classes = self.obj_classes.add_items(obj_classes_list) sly.logger.info('Determined {} class(es).'.format(len(self.cls2col)), extra={'classes': list(self.cls2col.keys())}) self.color2class_name = {v: k for k, v in self.cls2col.items()}
def get_ann(img_path, coords, words): global classes_dict ann = sly.Annotation.from_img_path(img_path) class_name = 'text' color = [255, 0, 0] name = img_path.split('/')[-1] line = coords[name] points = [sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2)] polygon = sly.Polygon(exterior=points, interior=[]) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label( sly.Label(polygon, classes_dict.get(class_name), None, words[name])) return ann
def upload_project_meta(api, project_id, config_yaml_info): classes = [] for class_id, class_name in enumerate(config_yaml_info["names"]): yaml_class_color = config_yaml_info["colors"][class_id] obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Rectangle, color=yaml_class_color) classes.append(obj_class) tags_arr = [ sly.TagMeta(name="train", value_type=sly.TagValueType.NONE), sly.TagMeta(name="val", value_type=sly.TagValueType.NONE) ] project_meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection(items=classes), tag_metas=sly.TagMetaCollection(items=tags_arr)) api.project.update_meta(project_id, project_meta.to_json()) return project_meta
def construct_model_meta(model): names = model.module.names if hasattr(model, 'module') else model.names colors = None if hasattr(model, 'module') and hasattr(model.module, 'colors'): colors = model.module.colors elif hasattr(model, 'colors'): colors = model.colors else: colors = [] for i in range(len(names)): colors.append(sly.color.generate_rgb(exist_colors=colors)) obj_classes = [sly.ObjClass(name, sly.Rectangle, color) for name, color in zip(names, colors)] tags = [sly.TagMeta(CONFIDENCE, sly.TagValueType.ANY_NUMBER)] meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(obj_classes), tag_metas=sly.TagMetaCollection(tags)) return meta
def get_ann(img_path, inst_path): global classes_dict default_classes_colors = {} colors = [(0, 0, 0)] ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: mat = scipy.io.loadmat(inst_path) mask = mat['anno'] all_objects = mask[0][0][1][0] class_mask, unique_class_mask = {}, [] for obj in all_objects: object_name, object_mask = obj[0], obj[2] class_mask[object_name[0]] = object_mask unique_class_mask.append([object_name[0], object_mask]) if len(obj[3]) > 0: all_parts = obj[3][0] for part in all_parts: class_mask[part[0][0]] = part[1] unique_class_mask.append([part[0][0], part[1]]) for class_name in class_mask.keys(): if class_name not in default_classes_colors: new_color = generate_rgb(colors) colors.append(new_color) default_classes_colors[class_name] = new_color for temp in unique_class_mask: class_name, cl_mask = temp mask = cl_mask.astype(np.bool) new_color = default_classes_colors[class_name] bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add( obj_class) # make it for meta.json ann = ann.add_label( sly.Label(bitmap, classes_dict.get(class_name))) return ann
def _load_cityscapes_annotation(self, orig_img_path, orig_ann_path) -> sly.Annotation: json_data = json.load(open(orig_ann_path)) ann = sly.Annotation.from_img_path(orig_img_path) for obj in json_data['objects']: class_name = obj['label'] if class_name == 'out of roi': polygon = obj['polygon'][:5] interiors = [obj['polygon'][5:]] else: polygon = obj['polygon'] interiors = [] interiors = [self.convert_points(interior) for interior in interiors] polygon = sly.Polygon(self.convert_points(polygon), interiors) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=sly.color.random_rgb()) ann = ann.add_label(sly.Label(polygon, obj_class)) if not self.obj_classes.has_key(class_name): self.obj_classes = self.obj_classes.add(obj_class) return ann
def get_ann(img_path, coords_text): global classes_dict ann = sly.Annotation.from_img_path(img_path) class_name = 'text' color = [255, 0, 255] len_polygon_points = 9 for i in range(0, len(coords_text), len_polygon_points): line = coords_text[i:i + len_polygon_points] text = line[8] points = [ sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2) ] polygon = sly.Polygon(exterior=points, interior=[]) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label( sly.Label(polygon, classes_dict.get(class_name), None, text)) return ann
def main(): args = parse_args() with open(args.in_file) as f: lines = f.readlines() names_list = [ln for ln in (line.strip() for line in lines) if ln] out_classes = sly.ObjClassCollection(items=[ sly.ObjClass(name=name, geometry_type=sly.Rectangle) for name in names_list ]) cls_mapping = {x: idx for idx, x in enumerate(names_list)} res_cfg = { SETTINGS: {}, 'out_classes': out_classes.to_json(), 'class_title_to_idx': cls_mapping, } config_filename = os.path.join(args.out_dir, sly.TaskPaths.MODEL_CONFIG_NAME) dump_json_file(res_cfg, config_filename, indent=4) print('Done: {} -> {}'.format(args.in_file, config_filename))
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict if inst_path is not None: instance_img = sly.image.read(inst_path) class_mask = cv2.split(instance_img)[0] class_mask = np.where(class_mask != 0, class_mask, 10) current_color2class = {} unique_pixels = np.unique(class_mask) for pixel in unique_pixels: current_color2class[pixel] = number_class[pixel] ann = sly.Annotation.from_img_path(img_path) for pixel, class_name in current_color2class.items(): new_color = pixel_color[pixel] mask = np.where(class_mask == pixel, class_mask, 0) mask = mask.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(new_color)) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def convert_annotation(src_ann, dst_project_meta): new_labels = [] for idx, lbl in enumerate(src_ann.labels): lbl: sly.Label if lbl.obj_class.geometry_type == sly.AnyGeometry: actual_geometry = type(lbl.geometry) new_class_name = "{}_{}".format( lbl.obj_class.name, actual_geometry.geometry_name()) new_class = dst_project_meta.get_obj_class(new_class_name) if new_class is None: new_class = sly.ObjClass(name=new_class_name, geometry_type=actual_geometry, color=sly.color.random_rgb()) dst_project_meta = dst_project_meta.add_obj_class( new_class) api.project.update_meta(dst_project.id, dst_project_meta.to_json()) new_labels.append(lbl.clone(obj_class=new_class)) else: new_labels.append(lbl) return src_ann.clone(labels=new_labels), dst_project_meta
def generate_annotation(src_img_path, inst_path, id_to_class, class_to_color, classes_collection): ann = sly.Annotation.from_img_path(src_img_path) if os.path.isfile(inst_path): instance_img = cv2.imread(inst_path, cv2.IMREAD_UNCHANGED) # expect uint16 col2coord = get_color_to_coordinates(instance_img) # Some dirty hack to determine class correctly, low byte is unused. (Low byte describe) current_color_to_class = { color: id_to_class[int(color // 256)] for color in col2coord.keys() } for color, class_name in current_color_to_class.items(): mask = (instance_img == color) # exact match for 1d uint16 bitmap = sly.Bitmap(mask) if not classes_collection.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=class_to_color.get( class_name, sly.color.random_rgb())) classes_collection = classes_collection.add(obj_class) ann = ann.add_label( sly.Label(bitmap, classes_collection.get(class_name))) instance_img[mask] = 0 # to check missing colors, see below if np.sum(instance_img) > 0: sly.logger.warn( 'Not all objects or classes are captured from source segmentation.', extra={}) return ann, classes_collection