def find_lines(lines_mask: np.ndarray) -> list: """ Finds the longest central line for each connected component in the given binary mask. :param lines_mask: Binary mask of the detected line-areas :return: a list of Opencv-style polygonal lines (each contour encoded as [N,1,2] elements where each tuple is (x,y) ) """ # Make sure one-pixel wide 8-connected mask lines_mask = skeletonize(lines_mask) class MakeLineMCP(MCP_Connect): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.connections = dict() self.scores = defaultdict(lambda: np.inf) def create_connection(self, id1, id2, pos1, pos2, cost1, cost2): k = (min(id1, id2), max(id1, id2)) s = cost1 + cost2 if self.scores[k] > s: self.connections[k] = (pos1, pos2, s) self.scores[k] = s def get_connections(self, subsample=5): results = dict() for k, (pos1, pos2, s) in self.connections.items(): path = np.concatenate([self.traceback(pos1), self.traceback(pos2)[::-1]]) results[k] = path[::subsample] return results def goal_reached(self, int_index, float_cumcost): if float_cumcost > 0: return 2 else: return 0 if np.sum(lines_mask) == 0: return [] # Find extremities points end_points_candidates = np.stack(np.where((convolve2d(lines_mask, np.ones((3, 3)), mode='same') == 2) & lines_mask)).T connected_components = skimage_label(lines_mask, connectivity=2) # Group endpoint by connected components and keep only the two points furthest away d = defaultdict(list) for pt in end_points_candidates: d[connected_components[pt[0], pt[1]]].append(pt) end_points = [] for pts in d.values(): d = euclidean_distances(np.stack(pts), np.stack(pts)) i, j = np.unravel_index(d.argmax(), d.shape) end_points.append(pts[i]) end_points.append(pts[j]) end_points = np.stack(end_points) mcp = MakeLineMCP(~lines_mask) mcp.find_costs(end_points) connections = mcp.get_connections() if not np.all(np.array(sorted([i for k in connections.keys() for i in k])) == np.arange(len(end_points))): print('Warning : find_lines seems weird') return [c[:, None, ::-1] for c in connections.values()]
def BoundingBox(self, I, knot=None): ''' Compute the Bounding Box of Non-Zero data ''' if knot is not None: label = skimage_label(I, connectivity=2) I = (label == label[knot[0], knot[1]]).astype(int) rp = regionprops(I) [xl, yl, xr, yr] = [int(b) for b in rp[0].bbox] self.xl = xl - 1 self.yl = yl - 1 self.xr = xr + 1 self.yr = yr + 1 return [self.xl, self.xr, self.yl, self.yr]
def prediction_to_keypoints(predictions, min_blob_size=None, prediction_threshold=0.9): """Convert prediction to keypoints Threshold prediction Find connected components Merge neighboring components idea: split blob if several parts are > min_blob_size :return: keypoints, categories (both as np.arrays) """ if min_blob_size is None: min_blob_size = 160 * config.scale ** 2 # thresholding -> 0, 1 predictions[:, :, 1:] = (predictions[:, :, 1:] >= prediction_threshold).astype(np.float64) prediction_argmax = np.argmax(predictions, axis=2) # find blobs in non-background prediction pixels (any keypoint category) blobs, num_blobs = skimage_label(prediction_argmax > 0, return_num=True, connectivity=2) points = [] points_categories = [] # create a keypoint from blob pixels blob_sizes = [] for blob in range(1, num_blobs + 1): blob_indices = np.argwhere(blobs == blob) if len(blob_indices) < min_blob_size: continue cats, support = np.unique(prediction_argmax[blobs == blob], return_counts=True) blob_sizes.append(len(blob_indices)) center = np.mean(blob_indices, axis=0).astype(np.int) winning_category = cats[np.argmax(support)] points.append(center) points_categories.append(winning_category) if len(points) > 0: points = np.flip(points, axis=1) # yx -> xy else: # fix: empty list does not have dimensions like (n, 2) points = np.array((0, 2)) points_categories = np.array(points_categories) # print('blob size:', np.mean(blob_sizes).astype(np.int)) return points, points_categories # , blob_sizes
def image2bb(img, thre = 128): ''' _image2bb(img, thre) convert image hot map into bb text strings image: prediction image hotmap thre: threshold to binarize the hotmap Return: string, contains boundingbox coordinates''' lbimg = skimage_label(numpy.uint8(img > thre), background = 0) bbox = [] for l in xrange(lbimg.max()+1): x, y = numpy.where(lbimg == l) x0 = min(x) y0 = min(y) x1 = max(x) y1 = max(y) bb = [x0, y0, x1, y1] bbox.append(bb) return bbox
def apply(self, binary: np.array, *args, **kwargs) -> List[geometry.base.BaseGeometry]: # Make sure one-pixel wide 8-connected mask lines_mask = skeletonize(binary) class MakeLineMCP(MCP_Connect): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.connections = dict() self.scores = defaultdict(lambda: np.inf) def create_connection(self, id1, id2, pos1, pos2, cost1, cost2): k = (min(id1, id2), max(id1, id2)) s = cost1 + cost2 if self.scores[k] > s: self.connections[k] = (pos1, pos2, s) self.scores[k] = s def get_connections(self, subsample=5): results = dict() for k, (pos1, pos2, s) in self.connections.items(): path = np.concatenate( [self.traceback(pos1), self.traceback(pos2)[::-1]]) results[k] = path[::subsample] return results def goal_reached(self, int_index, float_cumcost): if float_cumcost > 0: return 2 else: return 0 if np.sum(lines_mask) == 0: return [] # Find extremities points end_points_candidates = np.stack( np.where((convolve2d(lines_mask, np.ones((3, 3)), mode="same") == 2) & lines_mask)).T connected_components = skimage_label(lines_mask, connectivity=2) # Group endpoint by connected components and keep only the two points furthest away d = defaultdict(list) for pt in end_points_candidates: d[connected_components[pt[0], pt[1]]].append(pt) end_points = [] for pts in d.values(): d = euclidean_distances(np.stack(pts), np.stack(pts)) i, j = np.unravel_index(d.argmax(), d.shape) end_points.append(pts[i]) end_points.append(pts[j]) end_points = np.stack(end_points) mcp = MakeLineMCP(~lines_mask) mcp.find_costs(end_points) connections = mcp.get_connections() if not np.all( np.array(sorted([i for k in connections.keys() for i in k])) == np.arange(len(end_points))): print("Warning : find_lines seems weird") return [ geometry.LineString(c[:, ::-1]) for c in connections.values() if len(c) >= 2 ]
def generateDrops(imagePath, cfg, inputLabel=None): """ This function generate the drop with random position """ maxDrop = cfg["maxDrops"] minDrop = cfg["minDrops"] drop_num = randint(minDrop, maxDrop) maxR = cfg["maxR"] minR = cfg["minR"] ifReturnLabel = cfg["return_label"] edge_ratio = cfg["edge_darkratio"] PIL_bg_img = Image.open(imagePath) bg_img = np.asarray(PIL_bg_img) # to check if collision or not label_map = np.zeros_like(bg_img)[:, :, 0] imgh, imgw, _ = bg_img.shape # random drops position ran_pos = [(int(random.random() * imgw), int(random.random() * imgh)) for _ in range(drop_num)] listRainDrops = [] ######################### # Create Raindrop ######################### # create raindrop by default if inputLabel is None: for key, pos in enumerate(ran_pos): # label should start from 1 key = key + 1 radius = random.randint(minR, maxR) drop = raindrop.raindrop(key, pos, radius) listRainDrops.append(drop) #using input label else: arrayLabel = np.asarray(inputLabel) # get alpha condition = (arrayLabel[:, :, 0] > cfg["label_thres"]) label = np.where(condition, 1, 0) label_part, label_nums = skimage_label(label, connectivity=2, return_num=True) for idx in range(label_nums): # 0 is bg i = idx + 1 label_index = np.argwhere(label_part == i) U = np.min(label_index[:, 0]) D = np.max(label_index[:, 0]) + 1 L = np.min(label_index[:, 1]) R = np.max(label_index[:, 1]) + 1 cur_alpha = arrayLabel[U:D, L:R, 0].copy() #cur_alpha[(cur_alpha<=cfg["label_thres"])] = 0 cur_label = (cur_alpha > cfg["label_thres"]) * 1 # store left top centerxy = (L, U) drop = raindrop(idx, centerxy=centerxy, input_alpha=cur_alpha, input_label=cur_label) listRainDrops.append(drop) ######################### # Handle Collision ######################### collisionNum = len(listRainDrops) listFinalDrops = list(listRainDrops) loop = 0 # only check when using default raindrop if inputLabel is None: while collisionNum > 0: loop = loop + 1 listFinalDrops = list(listFinalDrops) collisionNum = len(listFinalDrops) label_map = np.zeros_like(label_map) # Check Collision for drop in listFinalDrops: # check the bounding (ix, iy) = drop.getCenters() radius = drop.getRadius() ROI_WL = 2 * radius ROI_WR = 2 * radius ROI_HU = 3 * radius ROI_HD = 2 * radius if (iy - 3 * radius) < 0: ROI_HU = iy if (iy + 2 * radius) > imgh: ROI_HD = imgh - iy if (ix - 2 * radius) < 0: ROI_WL = ix if (ix + 2 * radius) > imgw: ROI_WR = imgw - ix # apply raindrop label map to Image's label map drop_label = drop.getLabelMap() # check if center has already has drops if (label_map[iy, ix] > 0): col_ids = np.unique(label_map[iy - ROI_HU:iy + ROI_HD, ix - ROI_WL:ix + ROI_WR]) col_ids = col_ids[col_ids != 0] drop.setCollision(True, col_ids) label_map[iy - ROI_HU:iy + ROI_HD, ix - ROI_WL:ix + ROI_WR] = drop_label[ 3 * radius - ROI_HU:3 * radius + ROI_HD, 2 * radius - ROI_WL:2 * radius + ROI_WR] * drop.getKey() else: label_map[iy - ROI_HU:iy + ROI_HD, ix - ROI_WL:ix + ROI_WR] = drop_label[ 3 * radius - ROI_HU:3 * radius + ROI_HD, 2 * radius - ROI_WL:2 * radius + ROI_WR] * drop.getKey() # no collision collisionNum = collisionNum - 1 if collisionNum > 0: listFinalDrops = CheckCollision(listFinalDrops) # add alpha for the edge of the drops alpha_map = np.zeros_like(label_map).astype(np.float64) if inputLabel is None: for drop in listFinalDrops: (ix, iy) = drop.getCenters() radius = drop.getRadius() ROI_WL = 2 * radius ROI_WR = 2 * radius ROI_HU = 3 * radius ROI_HD = 2 * radius if (iy - 3 * radius) < 0: ROI_HU = iy if (iy + 2 * radius) > imgh: ROI_HD = imgh - iy if (ix - 2 * radius) < 0: ROI_WL = ix if (ix + 2 * radius) > imgw: ROI_WR = imgw - ix drop_alpha = drop.getAlphaMap() alpha_map[iy - ROI_HU:iy + ROI_HD, ix - ROI_WL:ix + ROI_WR] += drop_alpha[3 * radius - ROI_HU:3 * radius + ROI_HD, 2 * radius - ROI_WL:2 * radius + ROI_WR] else: for drop in listFinalDrops: (ix, iy) = drop.getCenters() drop_alpha = drop.getAlphaMap() h, w = drop_alpha.shape alpha_map[iy:iy + h, ix:ix + w] += drop_alpha[:h, :w] # alpha_map = arrayLabel[:,:,0].copy() ''' alpha_map = pyblur.GaussianBlur(Image.fromarray(np.uint8(alpha_map)), 10) alpha_map = np.asarray(alpha_map).astype(np.float) alpha_map = alpha_map/np.max(alpha_map)*255.0 ''' alpha_map = alpha_map / np.max(alpha_map) * 255.0 #cv2.imwrite("test.bmp", alpha_map) #sys.exit() # alpha_map[label<1] = 0 PIL_bg_img = Image.open(imagePath) for drop in listFinalDrops: # check bounding if inputLabel is None: (ix, iy) = drop.getCenters() radius = drop.getRadius() ROIU = iy - 3 * radius ROID = iy + 2 * radius ROIL = ix - 2 * radius ROIR = ix + 2 * radius if (iy - 3 * radius) < 0: ROIU = 0 ROID = 5 * radius if (iy + 2 * radius) > imgh: ROIU = imgh - 5 * radius ROID = imgh if (ix - 2 * radius) < 0: ROIL = 0 ROIR = 4 * radius if (ix + 2 * radius) > imgw: ROIL = imgw - 4 * radius ROIR = imgw else: # left top (ix, iy) = drop.getCenters() h, w = drop.getLabelMap().shape ROIU = iy ROID = iy + h ROIL = ix ROIR = ix + w tmp_bg = bg_img[ROIU:ROID, ROIL:ROIR, :] drop.updateTexture(tmp_bg) tmp_alpha_map = alpha_map[ROIU:ROID, ROIL:ROIR] output = drop.getTexture() tmp_output = np.asarray(output).astype(np.float)[:, :, -1] tmp_alpha_map = tmp_alpha_map * (tmp_output / 255) tmp_alpha_map = Image.fromarray(tmp_alpha_map.astype('uint8')) tmp_alpha_map.save("test.bmp") edge = ImageEnhance.Brightness(output) edge = edge.enhance(edge_ratio) if inputLabel is None: PIL_bg_img.paste(edge, (ix - 2 * radius, iy - 3 * radius), tmp_alpha_map) PIL_bg_img.paste(output, (ix - 2 * radius, iy - 3 * radius), output) else: PIL_bg_img.paste(edge, (ix, iy), tmp_alpha_map) PIL_bg_img.paste(output, (ix, iy), output) if ifReturnLabel: output_label = np.array(alpha_map) output_label.flags.writeable = True output_label[output_label > 0] = 1 output_label = Image.fromarray(output_label.astype('uint8')) return PIL_bg_img, output_label return PIL_bg_img