def segm_to_rle(segm, w, h): """ Convert segmentation map which can be polygons, uncompressed RLE to RLE. Reference: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py Args: segm (list<list<int>> or list<int>): A segmentation map which can be polygons. w (int): Image width h (int): Image hight Returns: rle: RLE """ if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = segm return rle
def poly2mask_single(h, w, poly): # TODO: write test for poly2mask, using mask2poly convert mask to poly', compare poly with poly' # visualize the mask rles = maskUtils.frPyObjects(poly, h, w) rle = maskUtils.merge(rles) mask = maskUtils.decode(rle) return mask
def annToRLE(ann, height, width): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ segm = ann['segmentation'] if isinstance(segm, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, height, width) rle = maskUtils.merge(rles) elif isinstance(segm['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(segm, height, width) else: # rle rle = ann['segmentation'] return rle
def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ im = self.images.xs(ann.image_id) h, w = im.height, im.width segm = ann.segmentation if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle
def poly2mask_single(h, w, poly): # TODO: write test for poly2mask, using mask2poly convert mask to poly', compare poly with poly' # visualize the mask rles = maskUtils.frPyObjects(poly, h, w) rle = maskUtils.merge(rles) mask = maskUtils.decode(rle) # sum = mask.sum() # print("{} {} {} {}".format(sum, h, w, poly)) # if not mask.any(): # pass return mask
def __getitem__(self, index): img_file = os.path.join(self._img_dir, str(self._infos.data[index].id) + '.jpg') img = transforms.ToTensor()(Image.open(img_file).convert('RGB')) img_w, img_h = img.size(2), img.size(1) target = torch.LongTensor(img_h, img_w).zero_() for inst in self._infos.data[index].insts: polys = [] # { bg, person, bicycle, car, motorcycle, truck, bus, train } if self._n_class == 7: if inst.category_idx == 8: inst.category_idx = 5 if inst.category_idx <= 6: for poly in inst.seg: polys.append(poly.tolist()) # { bg, person } elif self._n_class == 2: if inst.category_idx == 1: for poly in inst.seg: polys.append(poly.tolist()) if polys: rles = maskUtils.frPyObjects(polys, img_h, img_w) rle = maskUtils.merge(rles) mask = maskUtils.decode(rle) target.masked_fill_(torch.from_numpy(mask), inst.category_idx) p_w = self._infos.patchSize.w p_h = self._infos.patchSize.h x0 = random.randint(0, (img_w - p_w)) y0 = random.randint(0, (img_h - p_h)) img = img[:, y0:y0+p_h, x0:x0+p_w] target = target[y0:y0+p_h, x0:x0+p_w] return img, target
def main(): inputfile = '/home/qinjian/Segmentation/地理遥感图像分割/aicrowd房屋分割竞赛/val/images' jsonfile = '/home/qinjian/Segmentation/地理遥感图像分割/aicrowd房屋分割竞赛/val/annotation-small.json' outputfile = '/home/qinjian/Segmentation/地理遥感图像分割/aicrowd房屋分割竞赛/val/show' mkdir_os(outputfile) coco = COCO(jsonfile) catIds = coco.getCatIds(catNms=['wires']) # catIds=1 表示人这一类 imgIds = coco.getImgIds(catIds=catIds) # 图片id,许多值 for i in range(len(imgIds)): if i % 100 == 0: print(i, "/", len(imgIds)) img = coco.loadImgs(imgIds[i])[0] cvImage = cv2.imread(os.path.join(inputfile, img['file_name']), -1) cvImage = cv2.cvtColor(cvImage, cv2.COLOR_BGR2GRAY) cvImage = cv2.cvtColor(cvImage, cv2.COLOR_GRAY2BGR) annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None) anns = coco.loadAnns(annIds) polygons = [] color = [] for ann in anns: if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((int(len(seg) / 2), 2)) poly_list = poly.tolist() polygons.append(poly_list) if ann['iscrowd'] == 0: color.append([0, 0, 255]) if ann['iscrowd'] == 1: color.append([0, 255, 255]) else: exit() print("-------------") # mask t = imgIds[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = maskUtils.decode(rle) if ann['iscrowd'] == 0: color_mask = np.array([0, 0, 255]) if ann['iscrowd'] == 1: color_mask = np.array([0, 255, 255]) mask = m.astype(np.bool) cvImage[mask] = cvImage[mask] * 0.7 + color_mask * 0.3 point_size = 2 thickness = 2 for key in range(len(polygons)): ndata = polygons[key] cur_color = color[key] for k in range(len(ndata)): data = ndata[k] cv2.circle(cvImage, (int(data[0]), int(data[1])), point_size, (cur_color[0], cur_color[1], cur_color[2]), thickness) cv2.imwrite(os.path.join(outputfile, img['file_name']), cvImage)