def get_instance_segmentation(self, img, lower_thres: list = (0, 0, 0), upper_thresh: list = (255, 255, 255)): color_mask = cv2.inRange(src=img, lowerb=lower_thres, upperb=upper_thresh) color_contours, _ = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) seg = Segmentation.from_contour(contour_list=color_contours) return seg
def get_instance_segmentation(self, img: np.ndarray, target_bgr: List[int] = None, interval: int = 1, exclude_invalid_polygons: bool = True): target_bgr = target_bgr if target_bgr is not None else self.get_color_from_id( ) check_list_length(target_bgr, correct_length=3) lower_bgr = [ val - interval if val - interval >= 0 else 0 for val in target_bgr ] upper_bgr = [ val + interval if val + interval <= 255 else 255 for val in target_bgr ] color_mask = cv2.inRange(src=img, lowerb=tuple(lower_bgr), upperb=tuple(upper_bgr)) color_contours, _ = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) seg = Segmentation.from_contour( contour_list=color_contours, exclude_invalid_polygons=exclude_invalid_polygons) return seg
from logger import logger from common_utils.common_types.segmentation import Segmentation, Polygon import numpy as np contours = [ np.array([[[0, 0]], [[1, 0]], [[1, 2]], [[0, 1]]]), np.array([[[0, 0]], [[1, 0]], [[1, 2]], [[0, 1]]]), np.array([[[1, 0]], [[2, 0]], [[2, 2]], [[1, 1]]]), np.array([[[1, 1]], [[2, 1]], [[2, 3]], [[2, 10]], [[1, 2]]]), np.array([[[50, 100]], [[100, 200]]]) ] # seg = Segmentation.from_contour(contour_list=contours) seg = Segmentation.from_contour(contour_list=contours, exclude_invalid_polygons=True) logger.purple(seg)
def from_dict(cls, ann_dict: dict, strict: bool = True) -> COCO_Annotation: if strict: check_required_keys(ann_dict, required_keys=[ 'segmentation', 'num_keypoints', 'area', 'iscrowd', 'keypoints', 'image_id', 'bbox', 'category_id', 'id' ]) return COCO_Annotation( segmentation=Segmentation.from_list(ann_dict['segmentation'], demarcation=False), num_keypoints=ann_dict['num_keypoints'], area=ann_dict['area'], iscrowd=ann_dict['iscrowd'], keypoints=Keypoint2D_List.from_list(ann_dict['keypoints'], demarcation=False), image_id=ann_dict['image_id'], bbox=BBox.from_list(ann_dict['bbox'], input_format='pminsize'), category_id=ann_dict['category_id'], id=ann_dict['id'], keypoints_3d=Keypoint3D_List.from_list( ann_dict['keypoints_3d'], demarcation=False) if 'keypoints_3d' in ann_dict else None, camera=Camera.from_dict(ann_dict['camera_params']) if 'camera_params' in ann_dict else None) else: check_required_keys( ann_dict, required_keys=['id', 'category_id', 'image_id']) if 'segmentation' not in ann_dict: seg = None elif ann_dict['iscrowd'] == 1 and type( ann_dict['segmentation']) == dict: compressed_rle = mask.frPyObjects( ann_dict['segmentation'], ann_dict['segmentation']['size'][0], ann_dict['segmentation']['size'][1]) seg_mask = mask.decode(compressed_rle) contours, _ = cv2.findContours(seg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) seg = Segmentation.from_contour(contour_list=contours) else: seg = Segmentation.from_list(ann_dict['segmentation'], demarcation=False) return COCO_Annotation( segmentation=seg, num_keypoints=ann_dict['num_keypoints'] if 'num_keypoints' in ann_dict else None, area=ann_dict['area'] if 'area' in ann_dict else None, iscrowd=ann_dict['iscrowd'] if 'iscrowd' in ann_dict else None, keypoints=Keypoint2D_List.from_list(ann_dict['keypoints'], demarcation=False) if 'keypoints' in ann_dict else None, image_id=ann_dict['image_id'], bbox=BBox.from_list(ann_dict['bbox'], input_format='pminsize') if 'bbox' in ann_dict else None, category_id=ann_dict['category_id'], id=ann_dict['id'], keypoints_3d=Keypoint3D_List.from_list( ann_dict['keypoints_3d'], demarcation=False) if 'keypoints_3d' in ann_dict else None, camera=Camera.from_dict(ann_dict['camera_params']) if 'camera_params' in ann_dict else None)