def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
def _findContours(self): contours = [] mask = self.mask.detach().numpy() mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert entity.shape[ 1] == 1, "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
def overlay_masks(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) # contours = cv2.findContours(image=mask.copy(), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) # _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours, hierarchy = findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentation = [] for contour in contours: # Valid polygons have >= 6 coordinates (3 points) if contour.size >= 6: segmentation.append(contour.flatten().tolist()) instanceObj_dict['contours'] = segmentation instances[id2label[instanceObj.labelID].name].append( instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") #print(instanceDict) return instanceDict