def pologons_to_mask(polygons, size): height, width = size # formatting for COCO PythonAPI rles = mask_utils.frPyObjects(polygons, height, width) rle = mask_utils.merge(rles) mask = mask_utils.decode(rle) return mask
def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle
def convert_coco_poly_to_mask(segmentations, height, width): masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = torch.as_tensor(mask, dtype=torch.uint8) mask = mask.any(dim=2) masks.append(mask) if masks: masks = torch.stack(masks, dim=0) else: masks = torch.zeros((0, height, width), dtype=torch.uint8) return masks
def __init__(self, id, category, category_id, bbox, area, segment, crop_builder, image, which_set): self.id = id self.category = category self.category_id = category_id self.bbox = bbox self.area = area self.segment = segment # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py self.rle_mask = None if use_coco: self.rle_mask = cocoapi.frPyObjects(self.segment, h=image.height, w=image.width) # logger.info("Guess_What = {} ".format(self.rle_mask)) # logger.info("get_mask_shape A= {} ".format(self.get_mask().shape )) # get_mask_or = self.get_mask() # get_mask = np.reshape(get_mask_or,(360,640)) # logger.info("get_mask_shape B= {} ".format(get_mask.shape)) # img_path = os.path.join("data/img/raw", "{}.jpg".format(image.id)) # img = PIL_Image.open(img_path).convert('RGB') # img = resize_image(img, self.width , self.height) # # logger.info("/*/*/*/*/******* Image ********/*/*/*/*/*/*/*/ {}".format(self.img_path)) # # logger.info("img_shape = {} {} ".format(self.width,self.height)) # img_segment = np.multiply(img,get_mask_or) # plt.imshow(img_segment) # # plt.imshow(get_mask) # plt.show() if crop_builder is not None: filename = "{}.jpg".format(image.id) # logger.info("id = {} ,filemane = {} ,which_set = {},bbox = {}".format(id,filename,which_set,bbox)) self.crop_loader = crop_builder.build( id, filename=filename, which_set=which_set, bbox=bbox) # logger.info("Image_id=",image.id)
def __init__(self, crop_id, category, category_id, bbox, area, segment, crop_builder, image): self.id = crop_id self.category = category self.category_id = category_id self.bbox = bbox self.area = area self.segment = segment # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py if type(segment) == dict or type(segment[0]) == list: # polygon self.rle_mask = cocoapi.frPyObjects(segment, h=image.height, w=image.width) else: self.rle_mask = segment if crop_builder is not None: self.crop_loader = crop_builder.build(crop_id, filename=image.filename, bbox=bbox) self.crop_scale = crop_builder.scale
def __init__(self, id, category, category_id, bbox, area, segment, crop_builder, image, which_set): self.id = id self.category = category self.category_id = category_id self.bbox = bbox self.area = area self.segment = segment # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py self.rle_mask = None if use_coco: self.rle_mask = cocoapi.frPyObjects(self.segment, h=image.height, w=image.width) self.crop_loader = None if crop_builder is not None: filename = "{}.jpg".format(image.id) self.crop_loader = crop_builder.build(id, filename=filename, which_set=which_set, bbox=bbox) self.crop_scale = crop_builder.scale
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception('datasetType not supported') if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape( (int(len(seg) / 2), 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = maskUtils.decode(rle) img = np.ones((m.shape[0], m.shape[1], 3)) if ann['iscrowd'] == 1: color_mask = np.array([2.0, 166.0, 101.0]) / 255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:, :, i] = color_mask[i] ax.imshow(np.dstack((img, m * 0.5))) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array( self.loadCats(ann['category_id'])[0]['skeleton']) - 1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk] > 0): plt.plot(x[sk], y[sk], linewidth=3, color=c) plt.plot(x[v > 0], y[v > 0], 'o', markersize=8, markerfacecolor=c, markeredgecolor='k', markeredgewidth=2) plt.plot(x[v > 1], y[v > 1], 'o', markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption'])