def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v==1], y[v==1],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v==2], y[v==2],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption']
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg) / 2, 2)) polygons.append(Polygon(poly, True, alpha=0.4)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones((m.shape[0], m.shape[1], 3)) if ann['iscrowd'] == 1: color_mask = np.array([2.0, 166.0, 101.0]) / 255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:, :, i] = color_mask[i] ax.imshow(np.dstack((img, m * 0.5))) p = PatchCollection(polygons, facecolors=color, edgecolors=(0, 0, 0, 1), linewidths=3, alpha=0.4) ax.add_collection(p) elif datasetType == 'captions': n = 0 cap = [None] * 5 for ann in anns: #print ann['caption'] if n < 5: cap[n] = ann['caption'] #print cap[n] n = n + 1 print n print cap return cap
def annToMask(self, ann): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann) m = maskUtils.decode(rle) return m
def showModalInstance(self, anns, k): """ Display k-th instance: print its visible mask anns: a single annotation k: the depth order of anns, 1-index """ if type(anns) == list: print("ann cannot be a list! Should be a dict") return 0 ax = plt.gca() c = np.random.random((1, 3)).tolist()[0] c = [0.0, 1.0, 0.0] # green ann = anns['regions'][k - 1] polygons = [] color = [] # draw whole mask if 'visible_mask' in ann: mm = mask.decode([ann['visible_mask']]) img = np.ones((mm.shape[0], mm.shape[1], 3)) color_mask = c for i in range(3): img[:, :, i] = color_mask[i] ax.imshow(np.dstack((img, mm * 0.6))) else: if type(ann['segmentation']) == list: # polygon seg = ann['segmentation'] poly = np.array(seg).reshape((len(seg) // 2, 2)) polygons.append(Polygon(poly, True, alpha=0.2)) color.append(c) else: #mask mm = mask.decode([ann['segmentation']]) img = np.ones((mm.shape[0], mm.shape[1], 3)) color_mask = c for i in range(3): img[:, :, i] = color_mask[i] ax.imshow(np.dstack((img, mm * 0.6))) p = PatchCollection(polygons, facecolors=color, edgecolors=(0, 0, 0, 1), linewidths=3, alpha=0.4) ax.add_collection(p)
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly, True,alpha=0.4)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) elif datasetType == 'captions': n=0 cap= [None] * 5 for ann in anns: #print ann['caption'] if n<5: cap[n]=ann['caption'] #print cap[n] n = n + 1 print n print cap return cap
def drawSegmentation(image, anns, img): """ draws segmentation on input image :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return False if 'segmentation' in anns[0]: # sort annotations from biggest to smallest to avoid occlusions anns.sort(key=lambda x: x['area'], reverse=True) if anns[len(anns)-1]['area'] < 200: return False for ann in anns: # open file making the conversion MSCOCO classes -> VOC classes f = open('classes.txt', 'r') for line in f: splt = line.split('\t') if ann['category_id'] == int(splt[0]): pixelvalue = int(splt[1]) break f.close() c = [pixelvalue, pixelvalue, pixelvalue] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) pts = np.array(poly, np.int32) pts.reshape((-1,1,2)) cv2.polylines(image,[pts],True,(255,255,255), 3) cv2.fillPoly(image, [pts], c) else: # mask t = coco.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) for i in range(3): img[:,:,i] = pixelvalue mask2 = np.dstack( (img, m) ) for x in range(img.shape[0]): for y in range(img.shape[1]): if not mask2[x][y][3] == 0: image[x][y] = c return True
def annToMask(segm, h, w): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = segm return maskUtils.decode(rle)
def showMask(self, M, ax, c=[0, 1, 0]): m = mask.decode([M]) img = np.ones((m.shape[0], m.shape[1], 3)) # get boundary quickly B = np.zeros((m.shape[0], m.shape[1])) for aa in range(m.shape[0] - 1): for bb in range(m.shape[1] - 1): #kk = aa*m.shape[1]+bb if m[aa, bb] != m[aa, bb + 1]: B[aa, bb], B[aa, bb + 1] = 1, 1 if m[aa, bb] != m[aa + 1, bb]: B[aa, bb], B[aa + 1, bb] = 1, 1 if m[aa, bb] != m[aa + 1, bb + 1]: B[aa, bb], B[aa + 1, bb + 1] = 1, 1 for i in range(3): img[:, :, i] = c[i] ax.imshow(np.dstack((img, B * 1))) ax.imshow(np.dstack((img, m * 0.3)))
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: if 'bbox' in anns[0]: datasetType = 'detections' else: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception("datasetType not supported") if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'detections': ax = plt.gca() colors = plt.cm.hsv(np.linspace(0, 1, 91)).tolist() for ann in anns: cat_id = ann['category_id'] color = colors[cat_id] bbox = ann['bbox'] coords = (bbox[0], bbox[1]), bbox[2], bbox[3] ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=3)) name = 'Unknown' for cat in self.dataset['categories']: if ann['category_id'] == cat['id']: name = cat['name'] if 'score' in ann: score = ann['score'] display_text = '%s: %.2f' % (name, score) else: display_text = name ax.text(bbox[0], bbox[1], display_text, bbox={'facecolor':color, 'alpha':0.5}) elif datasetType == 'captions': for ann in anns: print ann['caption']
im_categories = categories[annotation_ii_to_category_ii] misc.imsave(output_images + '/image_' + str(i) + '_999.jpg', im) training_image_category_id.append(im_category_id) training_image_category_name.append(im_categories['supercategory']) temp_content_ids = [] temp_content_names = [] num_annotations = len(annotation_seg) if num_annotations > 0: for s in range(0, num_annotations): # poly = np.array(s).reshape((len(s)/2, 2)) # polygons.append(Polygon(poly)) # if type(s['counts']) == list: rle = mask.frPyObjects([annotation_seg[s]], im_h, im_w) # else: # rle = [s] m = mask.decode(rle) if len(im.shape) == 2: im = im[:, :, None] im = np.tile(im, [1, 1, 3]) m_im = np.tile(m, [1, 1, 3]) masked_im = np.multiply(im, m_im) misc.imsave( output_images + '/image_' + str(training_image_category_id[0]) + '_' + str(i) + '_' + str(s) + '.jpg', masked_im) temp_content_ids.append(im_categories['id']) temp_content_names.append(im_categories['name']) training_content_category_id.append(temp_content_ids) training_content_category_name.append(temp_content_names) np.savez(home_dir + '/coco_processed_labels', training_image_category_id=training_image_category_id,