def get_ann(img_path, inst_path): global classes_dict ann = sly.Annotation.from_img_path(img_path) instance_img = sly.image.read(inst_path) img_gray = cv2.cvtColor(instance_img, cv2.COLOR_BGR2GRAY) _, mask_foreground = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY) mask_background = (img_gray == 0) class_name = 'background' color = [1, 1, 1] bitmap = sly.Bitmap(data=mask_background) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) mask_foreground = mask_foreground.astype(np.bool) if np.any(mask_foreground): class_name = 'object' color = [255, 255, 255] bitmap = sly.Bitmap(data=mask_foreground) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_masks(img): alpha = img[:, :, 3] mask_fg = alpha >= threshold mask_fuzzy = np.logical_and(alpha > 0, alpha < 255) fg = sly.Label(sly.Bitmap(mask_fg), foreground) fzz = None if np.any(mask_fuzzy) and need_fuzzy: fzz = sly.Label(sly.Bitmap(mask_fuzzy), fuzzy) return fg, fzz
def test_from_supervisely(): import supervisely_lib as sly data_path = "./data/test_supervisely/from_to" if os.path.exists(data_path): shutil.rmtree(data_path) project_name = "pixel_project" project_path = os.path.join(data_path, project_name) project = sly.Project(project_path, sly.OpenMode.CREATE) init_meta = project.meta project.meta._project_type = "images" project_ds = project.create_dataset(project_name) img = np.ones((30, 30, 3)) project_ds.add_item_np("pixel.jpeg", img) item_path, item_ann_path = project_ds.get_item_paths("pixel.jpeg") ann = sly.Annotation.load_json_file(item_ann_path, project.meta) bbox_class = sly.ObjClass(name="_bbox", geometry_type=sly.Rectangle) meta_with_bboxes = project.meta.add_obj_classes([bbox_class]) bbox_label = sly.Label( geometry=sly.Rectangle(0, 0, 10, 10), obj_class=meta_with_bboxes.obj_classes.get("_bbox"), ) ann_with_bboxes = ann.add_labels([bbox_label]) project_ds.set_ann("pixel.jpeg", ann_with_bboxes) project.set_meta(meta_with_bboxes) trans = hub.Dataset.from_supervisely(project) dataset = trans.store(os.path.join(data_path, "pixel_dataset_bbox")) project_back = dataset.to_supervisely( os.path.join(data_path, "pixel_project_bbox_back")) project.set_meta(init_meta) poly_class = sly.ObjClass(name="_poly", geometry_type=sly.Polygon) meta_with_poly = project.meta.add_obj_classes([poly_class]) points = [[0, 0], [0, 10], [10, 0], [10, 10]] point_loc_points = [ sly.geometry.point_location.PointLocation(*point) for point in points ] poly_label = sly.Label( geometry=sly.Polygon(exterior=point_loc_points, interior=[]), obj_class=meta_with_poly.obj_classes.get("_poly"), ) ann_with_polys = ann.add_labels([poly_label]) project_ds.set_ann("pixel.jpeg", ann_with_polys) project.set_meta(meta_with_poly) trans = hub.Dataset.from_supervisely(project) dataset = trans.store(os.path.join(data_path, "pixel_dataset_poly")) project_back = dataset.to_supervisely( os.path.join(data_path, "pixel_project_poly_back"))
def get_ann(img_path, inst_path, default_classes_colors): global classes_dict instance_img = sly.image.read(inst_path) colored_img = instance_img ann = sly.Annotation.from_img_path(img_path) unique_colors = np.unique(instance_img.reshape(-1, instance_img.shape[2]), axis=0) for color in unique_colors: mask = np.all(colored_img == color, axis=2) class_name = default_classes_colors[tuple(color)] mask = mask.astype(np.uint8) * 128 im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(mask.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [arr], (254, 254, 254)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(color)) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: instance_img = sly.image.read(inst_path) instance_img = instance_img[:, :, 0] + 1 current_color2class = {} temp = np.unique(instance_img) for pixel in temp: current_color2class[pixel] = number_class[pixel] for pixel, class_name in current_color2class.items(): new_color = pixel_color[pixel] imgray = np.where(instance_img == pixel, instance_img, 0) ret, thresh = cv2.threshold(imgray, 1, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [np.int32(arr)], (255, 255, 255)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict ann = sly.Annotation.from_img_path(img_path) mat = scipy.io.loadmat(inst_path) instance_img = mat['MM'][0][0][0] instance_img = instance_img.astype(np.uint8) + 1 colored_img = instance_img unique_pixels = np.unique(instance_img) for pixel in unique_pixels: color = pixel_color[pixel] class_name = number_class[pixel] imgray = np.where(colored_img == pixel, colored_img, 0) ret, thresh = cv2.threshold(imgray, 1, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [arr], (255, 255, 255)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(color)) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict instance_img = [] with open(inst_path) as file: for line in file: line = line.split('\n')[0] line = line.split(' ') instance_img.append(line) instance_img = np.array(instance_img, np.uint8) instance_img = instance_img + 2 curr_color_to_class = {} temp = np.unique(instance_img) for pixel in temp: if pixel == 255: continue curr_color_to_class[pixel] = number_class[pixel] ann = sly.Annotation.from_img_path(img_path) for color, class_name in curr_color_to_class.items(): new_color = list(pixel_color[color]) mask = np.where(instance_img == color, instance_img, 0) im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [np.int32(arr)], (254, 254, 254)) mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def _get_ann(self, img_path, segm_path, inst_path): segmentation_img = sly.image.read(segm_path) if inst_path is not None: instance_img = sly.image.read(inst_path) colored_img = instance_img instance_img16 = instance_img.astype(np.uint16) col2coord = get_col2coord(instance_img16) curr_col2cls = ((col, self.color2class_name.get(tuple(segmentation_img[coord]))) for col, coord in col2coord.items()) curr_col2cls = {k: v for k, v in curr_col2cls if v is not None} # _instance_ color -> class name else: colored_img = segmentation_img curr_col2cls = self.color2class_name ann = sly.Annotation.from_img_path(img_path) for color, class_name in curr_col2cls.items(): mask = np.all(colored_img == color, axis=2) # exact match (3-channel img & rgb color) bitmap = sly.Bitmap(data=mask) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=color) if not self.obj_classes.has_key(class_name): self.obj_classes = self.obj_classes.add(obj_class) ann = ann.add_label(sly.Label(bitmap, obj_class)) # clear used pixels in mask to check missing colors, see below colored_img[mask] = (0, 0, 0) if np.sum(colored_img) > 0: sly.logger.warn('Not all objects or classes are captured from source segmentation.') return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: mat = scipy.io.loadmat(inst_path) instance_img = mat['LabelMap'] colored_img = cv2.merge((instance_img, instance_img, instance_img)) current_color_to_class = {} temp = np.unique(instance_img) for pixel in temp: current_color_to_class[pixel] = number_class[str(pixel)] for pixel, class_name in current_color_to_class.items(): mask = np.all(colored_img == pixel, axis=2) # exact match (3-channel img & rgb color) new_color = pixel_color[str(pixel)] bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) # clear used pixels in mask to check missing colors, see below colored_img[mask] = (0, 0, 0) if np.sum(colored_img) > 0: sly.logger.warn('Not all objects or classes are captured from source segmentation.') return ann
def _generate_annotation(self, src_img_path, inst_path): ann = sly.Annotation.from_img_path(src_img_path) if os.path.isfile(inst_path): instance_img = self._read_img_unchanged(inst_path) col2coord = get_col2coord(instance_img) curr_col2cls = { col: self.cls_names[int( col // 256)] # some dirty hack to determine class correctly for col, coord in col2coord.items() } for color, class_name in curr_col2cls.items(): mask = instance_img == color # exact match for 1d uint16 bitmap = sly.Bitmap(data=mask) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=self.cls2col.get( class_name, sly.color.random_rgb())) if not self.obj_classes.has_key(class_name): self.obj_classes = self.obj_classes.add(obj_class) ann = ann.add_label(sly.Label(bitmap, obj_class)) instance_img[mask] = 0 # to check missing colors, see below if np.sum(instance_img) > 0: sly.logger.warn( 'Not all objects or classes are captured from source segmentation.', extra={}) return ann
def get_ann(img_path, inst_path, default_classes_colors, default_colors_classes): global classes_dict global count_of_colors ann = sly.Annotation.from_img_path(img_path) curr_color_to_class = {} if inst_path is not None: instance_img = sly.image.read(inst_path) instance_img[np.where((instance_img == [0, 0, 0]).all(axis=2))] = [1, 1, 1] colored_img = instance_img * 10 instance_img = instance_img * 10 unique_colors = np.unique(instance_img.reshape(-1, instance_img.shape[2]), axis=0) ann_colors = np.array(unique_colors).tolist() for color in ann_colors: if not color in default_classes_colors.values(): default_classes_colors['object{}'.format(count_of_colors)] = color default_colors_classes[tuple(color)] = 'object{}'.format(count_of_colors) curr_color_to_class[tuple(color)] = 'object{}'.format(count_of_colors) count_of_colors += 1 else: curr_color_to_class[tuple(color)] = default_colors_classes[tuple(color)] for color, class_name in curr_color_to_class.items(): mask = np.all(colored_img == color, axis=2) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(color)) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def get_ann(img_path, inst_path): global classes_dict ann = sly.Annotation.from_img_path(img_path) class_name = 'text' color = [255, 0, 255] if inst_path is not None: with open(inst_path, "r") as file: all_lines = file.readlines() for line in all_lines: line = line.strip('\n').split(',')[:9] text = line[8] if text == '###': text = '' line = line[:8] try: line = list(map(lambda i: int(i), line)) except ValueError: line[0] = line[0][1:] line = list(map(lambda i: int(i), line)) points = [ sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2) ] polygon = sly.Polygon(exterior=points, interior=[]) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color) classes_dict = classes_dict.add( obj_class) # make it for meta.json ann = ann.add_label( sly.Label(polygon, classes_dict.get(class_name), None, text)) return ann
def parse_line(line, img_width, img_height, project_meta, config_yaml_info): line_parts = line.split() if len(line_parts) != 5: raise Exception("Invalid annotation format") else: class_id, x_center, y_center, ann_width, ann_height = line_parts class_name = config_yaml_info["names"][int(class_id)] return sly.Label( convert_geometry(x_center, y_center, ann_width, ann_height, img_width, img_height), project_meta.get_obj_class(class_name))
def init(data, state): global reference_gallery local_path = os.path.join(ag.app.data_dir, ag.reference_path.lstrip("/")) ag.api.file.download(ag.team_id, ag.reference_path, local_path) ref_json = sly.json.load_json_file(local_path) for reference_key, ref_examples in ref_json["references"].items(): if len(ref_examples) >= 2: cnt_grid_columns = 2 else: cnt_grid_columns = 1 review_gallery = { "content": { "projectMeta": ag.gallery_meta.to_json(), "annotations": {}, "layout": [[] for i in range(cnt_grid_columns)] }, "previewOptions": ag.image_preview_options, "options": ag.image_grid_options, } for idx, reference_info in enumerate(ref_examples): image_url = reference_info["image_url"] [top, left, bottom, right] = reference_info["bbox"] figure_id = reference_info["geometry"]["id"] label = sly.Label( sly.Rectangle(top, left, bottom, right, sly_id=figure_id), ag.gallery_meta.get_obj_class("product")) catalog_info = catalog.index[reference_key] review_gallery["content"]["annotations"][figure_id] = { "url": image_url, "labelId": figure_id, # duplicate for simplicity "figures": [label.to_json()], "zoomToFigure": { "figureId": figure_id, "factor": 1.2 }, "catalogInfo": catalog_info } review_gallery["content"]["layout"][idx % cnt_grid_columns].append( figure_id) reference_gallery[reference_key] = review_gallery sly.logger.info(f"Number of items in catalog: {len(catalog.index)}") sly.logger.info(f"Number of references: {len(reference_gallery)}") data["userRef"] = {} # {1: "7861026000305"} #@TODO: for debug data["refGrid"] = reference_gallery state["selected"] = {}
def get_ann(img_path, inst_path, default_classes_colors): global classes_dict ann = sly.Annotation.from_img_path(img_path) instance_img = sly.image.read(inst_path) img_gray = cv2.cvtColor(instance_img, cv2.COLOR_BGR2GRAY) _, mask_foreground = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY) mask_background = (img_gray == 0) class_name = 'background' new_color = default_classes_colors[class_name] bitmap = sly.Bitmap(data=mask_background) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) im2, contours, hierarchy = cv2.findContours(mask_foreground, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) class_name = 'skin' new_color = default_classes_colors[class_name] for i in range(len(contours)): arr = np.array(contours[i], dtype=int) mask_temp = np.zeros(instance_img.shape, dtype=np.uint8) cv2.fillPoly(mask_temp, [np.int32(arr)], (255, 255, 255)) mask_temp = cv2.split(mask_temp)[0] mask = mask_temp.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def yolo_preds_to_sly_rects(detections, idx_to_class, confidence_tag_meta): labels = [] for classId, confidence, box in detections: xmin = box[0] - box[2] / 2 ymin = box[1] - box[3] / 2 xmax = box[0] + box[2] / 2 ymax = box[1] + box[3] / 2 rect = sly.Rectangle(round(ymin), round(xmin), round(ymax), round(xmax)) label = sly.Label(rect, idx_to_class[classId]) confidence_tag = sly.Tag(confidence_tag_meta, value=round(float(confidence), 4)) label = label.add_tag(confidence_tag) labels.append(label) return labels
def generate_example(augs_settings, augs=None, preview=True, product_id=None, img=None, ann=None): if product_id is None or img is None or ann is None: product_id, img, ann = get_random_product() if logging.getLevelName(sly.logger.level) == 'DEBUG': sly.image.write(os.path.join(vis_dir, "01_img.png"), img) label_image, label_mask = preprocess_product(img, ann, augs_settings, is_main=True) if logging.getLevelName(sly.logger.level) == 'DEBUG': sly.image.write(os.path.join(vis_dir, "02_label_image.png"), label_image) sly.image.write(os.path.join(vis_dir, "03_label_mask.png"), label_mask) orig_h, orig_w = label_image.shape[:2] for crop_f, place_f, range_index in zip(crops_funcs, place_funcs, list(range(0, 4))): if random.uniform(0, 1) <= augs_settings['noise']['corner_probability']: _, noise_img, noise_ann = get_random_product(ignore_id=product_id) noise_img, noise_ann = crop_label(noise_img, noise_ann, padding=0) noise_mask = draw_white_mask(noise_ann) if logging.getLevelName(sly.logger.level) == 'DEBUG': sly.image.write(os.path.join(vis_dir, "04_noise_img.png"), noise_img) if random.uniform(0, 1) <= augs_settings['noise']['aug_probability']: noise_img, noise_mask = augs.apply_to_foreground(noise_img, noise_mask) y_range = get_y_range(range_index, orig_h, portion=augs_settings["noise"]["max_occlusion_height"]) x_range = get_x_range(range_index, orig_w, portion=augs_settings["noise"]["max_occlusion_width"]) y = random.randint(int(y_range[0]), int(y_range[1])) x = random.randint(int(x_range[0]), int(x_range[1])) noise_img, noise_mask = crop_f(y, x, orig_h, orig_w, noise_img, noise_mask) if logging.getLevelName(sly.logger.level) == 'DEBUG': sly.image.write(os.path.join(vis_dir, f"04_noise_img_{range_index}.png"), noise_img) sly.image.write(os.path.join(vis_dir, f"05_noise_mask_{range_index}.png"), noise_mask) place_f(y, x, label_image, label_mask, noise_img, noise_mask) if logging.getLevelName(sly.logger.level) == 'DEBUG': sly.image.write(os.path.join(vis_dir, "06_final_mask.png"), label_mask) if not np.any(label_mask): # if empty mask - figure may be entirely covered by others return None, None, None label_preview = None if preview is True: label_preview = sly.Label( sly.Bitmap(label_mask[:, :, 0].astype(bool), origin=sly.PointLocation(0, 0)), RESULT_CLASS ) return label_image, label_mask, label_preview
def get_ann(img_path, coords, words): global classes_dict ann = sly.Annotation.from_img_path(img_path) class_name = 'text' color = [255, 0, 0] name = img_path.split('/')[-1] line = coords[name] points = [sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2)] polygon = sly.Polygon(exterior=points, interior=[]) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label( sly.Label(polygon, classes_dict.get(class_name), None, words[name])) return ann
def get_ann(img_path, inst_path): global classes_dict default_classes_colors = {} colors = [(0, 0, 0)] ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: mat = scipy.io.loadmat(inst_path) mask = mat['anno'] all_objects = mask[0][0][1][0] class_mask, unique_class_mask = {}, [] for obj in all_objects: object_name, object_mask = obj[0], obj[2] class_mask[object_name[0]] = object_mask unique_class_mask.append([object_name[0], object_mask]) if len(obj[3]) > 0: all_parts = obj[3][0] for part in all_parts: class_mask[part[0][0]] = part[1] unique_class_mask.append([part[0][0], part[1]]) for class_name in class_mask.keys(): if class_name not in default_classes_colors: new_color = generate_rgb(colors) colors.append(new_color) default_classes_colors[class_name] = new_color for temp in unique_class_mask: class_name, cl_mask = temp mask = cl_mask.astype(np.bool) new_color = default_classes_colors[class_name] bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add( obj_class) # make it for meta.json ann = ann.add_label( sly.Label(bitmap, classes_dict.get(class_name))) return ann
def inference(model, half, device, imgsz, stride, image: np.ndarray, meta: sly.ProjectMeta, conf_thres=0.25, iou_thres=0.45, augment=False, agnostic_nms=False, debug_visualization=False) -> sly.Annotation: names = model.module.names if hasattr(model, 'module') else model.names img0 = image # RGB # Padded resize img = letterbox(img0, new_shape=imgsz, stride=stride)[0] img = img.transpose(2, 0, 1) # to 3x416x416 img = np.ascontiguousarray(img) img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 if img.ndimension() == 3: img = img.unsqueeze(0) inf_out = model(img, augment=augment)[0] # Apply NMS labels = [] output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, agnostic=agnostic_nms) for i, det in enumerate(output): if det is not None and len(det): det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round() for *xyxy, conf, cls in reversed(det): top, left, bottom, right = int(xyxy[1]), int(xyxy[0]), int(xyxy[3]), int(xyxy[2]) rect = sly.Rectangle(top, left, bottom, right) obj_class = meta.get_obj_class(names[int(cls)]) tag = sly.Tag(meta.get_tag_meta(CONFIDENCE), round(float(conf), 4)) label = sly.Label(rect, obj_class, sly.TagCollection([tag])) labels.append(label) height, width = img0.shape[:2] ann = sly.Annotation(img_size=(height, width), labels=labels) if debug_visualization is True: # visualize for debug purposes vis = np.copy(img0) ann.draw_contour(vis, thickness=2) sly.image.write("vis.jpg", vis) return ann.to_json()
def read_mask_labels(mask_path: str, classes_mapping: Dict, obj_classes: sly.ObjClassCollection) -> List[sly.Label]: mask = cv2.imread(mask_path)[:, :, 0] labels_list = [] for cls_name, color in classes_mapping.items(): if color == MATCH_ALL: bool_mask = mask > 0 elif isinstance(color, int): bool_mask = mask == color elif isinstance(color, list): bool_mask = np.isin(mask, color) else: raise ValueError('Wrong color format. It must be integer, list of integers or special key string "__all__".') if bool_mask.sum() == 0: continue bitmap = sly.Bitmap(data=bool_mask) obj_class = obj_classes.get(cls_name) labels_list.append(sly.Label(geometry=bitmap, obj_class=obj_class)) return labels_list
def _load_cityscapes_annotation(self, orig_img_path, orig_ann_path) -> sly.Annotation: json_data = json.load(open(orig_ann_path)) ann = sly.Annotation.from_img_path(orig_img_path) for obj in json_data['objects']: class_name = obj['label'] if class_name == 'out of roi': polygon = obj['polygon'][:5] interiors = [obj['polygon'][5:]] else: polygon = obj['polygon'] interiors = [] interiors = [self.convert_points(interior) for interior in interiors] polygon = sly.Polygon(self.convert_points(polygon), interiors) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=sly.color.random_rgb()) ann = ann.add_label(sly.Label(polygon, obj_class)) if not self.obj_classes.has_key(class_name): self.obj_classes = self.obj_classes.add(obj_class) return ann
def visualize_dets(img_, output_, save_path_, names_, meta_): labels = [] for i, det in enumerate(output_): if det is not None and len(det): for *xyxy, conf, cls in reversed(det): left, top, right, bottom = int(xyxy[0]), int(xyxy[1]), int( xyxy[2]), int(xyxy[3]) rect = sly.Rectangle(top, left, bottom, right) obj_class = meta_.get_obj_class(names_[int(cls)]) tag = sly.Tag(meta_.get_tag_meta("confidence"), round(float(conf), 4)) label = sly.Label(rect, obj_class, sly.TagCollection([tag])) labels.append(label) width, height = img_.size ann = sly.Annotation(img_size=(height, width), labels=labels) vis = np.copy(img_) ann.draw_contour(vis, thickness=2) sly.image.write(save_path_, vis) return vis
def process(img, ann): original = (img, ann) flipped = sly.aug.fliplr(*original) crops = [] for cur_img, cur_ann in [original, flipped]: for i in range(image_multiplier): res_img, res_ann = sly.aug.random_crop_fraction( cur_img, cur_ann, (70, 90), (70, 90)) crops.append((res_img, res_ann)) results = [] for cur_img, cur_ann in [original, flipped, *crops]: bg_label = sly.Label(sly.Rectangle.from_array(cur_img), class_bg) cur_ann = cur_ann.add_label(bg_label) tag = sly.Tag(tag_meta_train if random.random() <= validation_portion else tag_meta_val) cur_ann = cur_ann.add_tag(tag) results.append((cur_img, cur_ann)) return results
def get_ann(img_path, coords_text): global classes_dict ann = sly.Annotation.from_img_path(img_path) class_name = 'text' color = [255, 0, 255] len_polygon_points = 9 for i in range(0, len(coords_text), len_polygon_points): line = coords_text[i:i + len_polygon_points] text = line[8] points = [ sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2) ] polygon = sly.Polygon(exterior=points, interior=[]) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color) classes_dict = classes_dict.add(obj_class) # make it for meta.json ann = ann.add_label( sly.Label(polygon, classes_dict.get(class_name), None, text)) return ann
def get_ann(img_path, inst_path, number_class, pixel_color): global classes_dict if inst_path is not None: instance_img = sly.image.read(inst_path) class_mask = cv2.split(instance_img)[0] class_mask = np.where(class_mask != 0, class_mask, 10) current_color2class = {} unique_pixels = np.unique(class_mask) for pixel in unique_pixels: current_color2class[pixel] = number_class[pixel] ann = sly.Annotation.from_img_path(img_path) for pixel, class_name in current_color2class.items(): new_color = pixel_color[pixel] mask = np.where(class_mask == pixel, class_mask, 0) mask = mask.astype(np.bool) bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=list(new_color)) classes_dict = classes_dict.add(obj_class) ann = ann.add_label(sly.Label(bitmap, classes_dict.get(class_name))) return ann
def transform_for_segmentation(meta: sly.ProjectMeta, ann: sly.Annotation) -> (sly.ProjectMeta, sly.Annotation): new_classes = {} class_masks = {} for obj_class in meta.obj_classes: obj_class: sly.ObjClass new_class = obj_class.clone(name=obj_class.name + "-mask") new_classes[obj_class.name] = new_class class_masks[obj_class.name] = np.zeros(ann.img_size, np.uint8) new_class_collection = sly.ObjClassCollection(list(new_classes.values())) for label in ann.labels: label.draw(class_masks[label.obj_class.name], color=255) new_labels = [] for class_name, white_mask in class_masks.items(): mask = white_mask == 255 obj_class = new_classes[class_name] bitmap = sly.Bitmap(data=mask) new_labels.append(sly.Label(geometry=bitmap, obj_class=obj_class)) res_meta = meta.clone(obj_classes=new_class_collection) res_ann = ann.clone(labels=new_labels) return (res_meta, res_ann)
def generate_annotation(src_img_path, inst_path, id_to_class, class_to_color, classes_collection): ann = sly.Annotation.from_img_path(src_img_path) if os.path.isfile(inst_path): instance_img = cv2.imread(inst_path, cv2.IMREAD_UNCHANGED) # expect uint16 col2coord = get_color_to_coordinates(instance_img) # Some dirty hack to determine class correctly, low byte is unused. (Low byte describe) current_color_to_class = { color: id_to_class[int(color // 256)] for color in col2coord.keys() } for color, class_name in current_color_to_class.items(): mask = (instance_img == color) # exact match for 1d uint16 bitmap = sly.Bitmap(mask) if not classes_collection.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=class_to_color.get( class_name, sly.color.random_rgb())) classes_collection = classes_collection.add(obj_class) ann = ann.add_label( sly.Label(bitmap, classes_collection.get(class_name))) instance_img[mask] = 0 # to check missing colors, see below if np.sum(instance_img) > 0: sly.logger.warn( 'Not all objects or classes are captured from source segmentation.', extra={}) return ann, classes_collection
image_idx = 0 for src_dataset in src_project: ds_progress = sly.Progress( 'Processing dataset: {!r}/{!r}'.format(src_project.name, src_dataset.name), total_cnt=len(src_dataset)) dst_dataset = dst_project.create_dataset(src_dataset.name) for item_name in src_dataset: item_paths = src_dataset.get_item_paths(item_name) img = sly.image.read(item_paths.img_path) ann = sly.Annotation.load_json_file(item_paths.ann_path, src_project.meta) # Add background. if class_bg is not None: bg_label = sly.Label(sly.Rectangle.from_array(img), class_bg) ann = ann.clone(labels=[bg_label] + ann.labels) # Decide whether this image and its crops should go to a train or validation fold. tag = sly.Tag(tag_meta_train) if is_train_image[image_idx] else sly.Tag(tag_meta_val) ann = ann.add_tag(tag) augmented_items = sly.aug.flip_add_random_crops( img, ann, crops_per_image, crop_side_fraction, crop_side_fraction) aug_imgs, aug_anns = zip(*augmented_items) names = sly.generate_names(item_name, len(augmented_items)) for aug_name, aug_img, aug_ann in zip(names, aug_imgs, aug_anns): dst_dataset.add_item_np(item_name=aug_name, img=aug_img, ann=aug_ann) image_idx += 1
def extract_foreground(): api = sly.Api.from_env() task_id = int(os.getenv("TASK_ID")) try: if DEBUG: sly.fs.mkdir(const.DEBUG_VIS_DIR) sly.fs.clean_dir(const.DEBUG_VIS_DIR) state = api.task.get_data(task_id, field=const.STATE) project = api.task.get_data(task_id, field="{}.projects[{}]".format( const.DATA, state[const.PROJECT_INDEX])) project_id = project["id"] workspace_id = api.project.get_info_by_id(project_id).workspace_id team_id = api.workspace.get_info_by_id(workspace_id).team_id processed_items = [] api.task.set_data(task_id, payload=processed_items, field="{}.{}".format(const.DATA, const.TABLE)) # sample images all_images = [] for dataset in api.dataset.get_list(project_id): images = api.image.get_list(dataset.id) image_dataset = [dataset] * len(images) all_images.extend(zip(images, image_dataset)) # read sample count if state[const.SAMPLE_FLAG]: cnt_images = state[const.SAMPLE_COUNT] assert cnt_images <= len(all_images) random.shuffle(all_images) all_images = all_images[:cnt_images] fg_class, st_class = set_project_meta(api, project_id, state) sly.fs.mkdir(const.CACHE_DIR) for idx, (image_info, dataset_info) in enumerate(all_images): table_row = {} image_id = image_info.id table_row['id'] = image_id sly.logger.debug("---> image_id = {}".format(image_id)) if DEBUG: debug_img_dir = os.path.join(const.DEBUG_VIS_DIR, str(image_id)) sly.fs.mkdir(debug_img_dir) image_url = api.image.url(team_id, workspace_id, project_id, dataset_info.id, image_id) table_row['name'] = '<a href="{0}" rel="noopener noreferrer" target="_blank">{1}</a>' \ .format(image_url, image_info.name) image_path = os.path.join(const.CACHE_DIR, "{}.png".format(image_id)) if not sly.fs.file_exists(image_path): api.image.download_path( image_id, image_path, ) image = sly.image.read(image_path, remove_alpha_channel=False) if image.shape[2] != 4: sly.logger.warning( "Image (id = {}) is skipped: it does not have alpha channel" .format(image_id)) continue if DEBUG: sly.image.write(os.path.join(debug_img_dir, '000_image.png'), image, remove_alpha_channel=False) alpha = image[:, :, 3] if DEBUG: sly.image.write(os.path.join(debug_img_dir, '001_alpha.png'), alpha) # extract foreground pixels mask = (alpha >= state[const.ALPHA_THRESHOLD]).astype( np.uint8) * 255 if DEBUG: sly.image.write(os.path.join(debug_img_dir, '002_mask.png'), mask) # create mask for all connected components mask_instances, num_cc = skimage.measure.label(mask, background=0, return_num=True) if DEBUG: vis = skimage.color.label2rgb(mask_instances, bg_label=0) sly.image.write(os.path.join(debug_img_dir, '003_mask_cc.png'), vis * 255) table_row['total objects'] = num_cc # remove small objects area_pixels = int(mask_instances.shape[0] * mask_instances.shape[1] / 100 * state[const.AREA_THRESHOLD]) mask_cleaned = skimage.morphology.remove_small_objects( mask_instances, area_pixels) mask_cleaned, num_cc = skimage.measure.label(mask_cleaned, background=0, return_num=True) if DEBUG: vis = skimage.color.label2rgb(mask_cleaned, bg_label=0) sly.image.write( os.path.join(debug_img_dir, '004_mask_cleaned.png'), vis * 255) cc_area = count_instances_area(mask_cleaned, num_cc) cc_area = cc_area[:state[const.MAX_NUMBER_OF_OBJECTS]] table_row['final objects'] = len(cc_area) labels = [] sly.logger.debug( "image_id = {}, number of extracted objects: {}".format( image_id, len(cc_area))) for cc_color, area in cc_area: object_mask = (mask_cleaned == cc_color) geometry = sly.Bitmap(data=object_mask) label = sly.Label(geometry, fg_class) labels.append(label) #find gray zone gray_zone = np.logical_and(alpha != 0, alpha != 255) if np.sum(gray_zone) != 0: #gray_zone is not empty gray_geometry = sly.Bitmap(data=gray_zone) gray_label = sly.Label(gray_geometry, st_class) labels.append(gray_label) table_row['gray area (%)'] = round( np.sum(gray_zone) * 100 / (gray_zone.shape[0] * gray_zone.shape[1]), 2) ann = sly.Annotation(mask.shape[:2], labels=labels) if DEBUG: render = np.zeros(ann.img_size + (3, ), dtype=np.uint8) ann.draw(render) sly.image.write(os.path.join(debug_img_dir, '005_ann.png'), render) api.annotation.upload_ann(image_id, ann) processed_items.append(table_row) #time.sleep(2) if (idx % const.NOTIFY_EVERY == 0 and idx != 0) or idx == len(all_images) - 1: api.task.set_data(task_id, payload=processed_items, field="{}.{}".format(const.DATA, const.TABLE), append=True) processed_items = [] update_progress(api, task_id, (idx + 1) * 100 / len(all_images)) need_stop = api.task.get_data(task_id, field="{}.{}".format( const.STATE, const.STOP_CLICKED)) if need_stop is True: reset_buttons_and_progress(api, task_id) sly.logger.info("SCRIPT IS STOPPED") exit(0) except Exception as e: sly.logger.critical('Unexpected exception', exc_info=True, extra={ 'event_type': sly.EventType.TASK_CRASHED, 'exc_str': str(e), }) sly.logger.debug('Script finished: ERROR') else: sly.logger.debug('Script finished: OK') finally: reset_buttons_and_progress(api, task_id) pass