def test_approximate_polygon(): out = approximate_polygon(square, 0.1) np.testing.assert_array_equal(out, square[(0, 3, 6, 9, 12), :]) out = approximate_polygon(square, 2.2) np.testing.assert_array_equal(out, square[(0, 6, 12), :]) out = approximate_polygon(square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :], 0.1) np.testing.assert_array_equal(out, square[(0, 3, 6, 9, 12), :]) out = approximate_polygon(square, -1) np.testing.assert_array_equal(out, square)
def poly_fit(self, low_threshold=50, high_threshold=120): polys=[] [h, w] = self.image.shape r_pad = self.pad labels = measure.label(self.image)#, connectivity=1) self.image=None for region in measure.regionprops(labels): minr, minc, maxr, maxc = region.bbox _r = max(minr - r_pad, 0) _c = max(minc - r_pad, 0) __r = min(maxr + r_pad, h + 2 * r_pad) __c = min(maxc + r_pad, w + 2 * r_pad) zone = np.array(labels[_r:__r, _c:__c] == region.label) if zone.shape[0] < 3 or zone.shape[1] < 3: continue contour = find_contours(zone, 0) if len(contour) < 1: continue coords = approximate_polygon(contour[0], tolerance=0.1) if len(coords) < 3: continue poly = Polygon(zip(coords[:, 0] + _r - self.pad, coords[:, 1] + _c - self.pad)) if poly.area < 10: continue poly=affine_transform(poly,self.AffMar) polys.append(GeosPolygon(list(poly.exterior.coords))) self.mp = MultiPolygon(polys)
def guess_corners(bw): """ Infer the corners of an image using a Sobel filter to find the edges and a Harris filter to find the corners. Takes only as single color chanel. Parameters ---------- bw : (m x n) ndarray of ints Returns ------- corners : pixel coordinates of plot corners, unsorted outline : (m x n) ndarray of bools True -> plot area """ assert len(bw.shape) == 2 bw = img_as_uint(bw) e_map = ndimage.sobel(bw) markers = np.zeros(bw.shape, dtype=int) markers[bw < 30] = 1 markers[bw > 150] = 2 seg = ndimage.watershed_ift(e_map, np.asarray(markers, dtype=int)) outline = ndimage.binary_fill_holes(1 - seg) corners = harris(np.asarray(outline, dtype=int)) corners = approximate_polygon(corners, 1) return corners, outline
def binary_mask_to_polygon(binary_mask, tolerance=0): """Converts a binary mask to COCO polygon representation Args: binary_mask: a 2D binary numpy array where '1's represent the object tolerance: Maximum distance from original points of polygon to approximated polygonal chain. If tolerance is 0, the original coordinate array is returned. """ polygons = [] # pad mask to close contours of shapes which start and end at an edge padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0) contours = measure.find_contours(padded_binary_mask, 0.5) contours = np.subtract(contours, 1) for contour in contours: contour = close_contour(contour) contour = measure.approximate_polygon(contour, tolerance) if len(contour) < 3: continue contour = np.flip(contour, axis=1) segmentation = contour.ravel().tolist() # after padding and subtracting 1 we may get -0.5 points in our segmentation segmentation = [0 if i < 0 else i for i in segmentation] polygons.append(segmentation) return polygons
def binary_mask_to_polygon(binary_mask, tolerance=0): """Converts a binary mask to COCO polygon representation :param binary_mask: a 2D binary numpy array where '1's represent the object :param tolerance: Maximum distance from original points of polygon to approximated polygonal chain. If tolerance is 0, the original coordinate array is returned. :return: Mask in polygon format """ polygons = [] # pad mask to close contours of shapes which start and end at an edge padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0) contours = np.array(measure.find_contours(padded_binary_mask, 0.5)) # Reverse padding contours = contours - 1 for contour in contours: # Make sure contour is closed contour = close_contour(contour) # Approximate contour by polygon polygon = measure.approximate_polygon(contour, tolerance) # Skip invalid polygons if len(polygon) < 3: continue # Flip xy to yx point representation polygon = np.flip(polygon, axis=1) # Flatten polygon = polygon.ravel() # after padding and subtracting 1 we may get -0.5 points in our segmentation polygon[polygon < 0] = 0 polygons.append(polygon.tolist()) return polygons
def vectorize_regions(im: np.ndarray, threshold: float = 0.5): """ Vectorizes lines from a binarized array. Args: im (np.ndarray): Array of shape (H, W) with the first dimension being a probability distribution over the region. threshold (float): Threshold for binarization Returns: [[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ] A list of lists containing the region polygons. """ bin = im > threshold contours = find_contours(bin, 0.5, fully_connected='high', positive_orientation='high') if len(contours) == 0: return contours approx_contours = [] for contour in contours: approx_contours.append( approximate_polygon(contour[:, [1, 0]], 1).astype('uint').tolist()) return approx_contours
def path_tracing(img_filtered, contours): storage = [] for contour in contours: coords = approximate_polygon(contour, tolerance=0.1) #if isClosed(coords) and polygon_area(coords) > 100: if isClosed(coords) and polygon_area(coords) > 100: storage.append(coords) return storage
def polygonization(file): img = img_as_float(io.imread(file, as_gray=True)) coordinates = [] for contour in find_contours(img, 0): coords = approximate_polygon(contour, tolerance=2.5) if len(coords) > 3: coordinates.append(coords) return coordinates
def get_approx_contour(bw_img): plt.imshow(bw_img) for contour in find_contours(bw_img, 0): coords = approximate_polygon(contour, tolerance=30) plt.plot(coords[:, 1], coords[:, 0]) plt.show()
def findPolygon(image): bestContour = None for contour in measure.find_contours(image, 0): if bestContour is None or len(bestContour) < len(contour): bestContour = contour return measure.approximate_polygon( bestContour, POLYGON_TOLERANCE)[:-1] # without last point (same as first)
def prepare_result(img: np.ndarray, pred_probs: np.ndarray, clicks: Clicker, gt_mask_file, tolerance: int = 1, view_img: bool = False, filename: str = None): """prepare result Args: img (np.ndarray): img in numpy.ndarray pred_mask (np.ndarray): predicted mask from model clicks(Clicker): Cliker object for click history gt_mask_file (FileStorage): ground truth mask file tolerance (int, optional): Precision to convert from mask to polygon in pixel. Defaults to 1. view_img (bool, optional): Return result image url. Defaults to False. """ # gen mask assert len( pred_probs ) == 1, f'Only one output is expected, but got {len(pred_probs)}' pred_probs = pred_probs[0] pred_mask = pred_probs > MODEL_THRESH # convert mask to polygon regions = Mask(pred_mask).polygons().points polygons = [] for polygon in regions: polygon2 = measure.approximate_polygon(polygon, tolerance) polygons.append(polygon2.tolist()) results = {'polygons': polygons} # calculate iou if gt_mask_file: gt_mask = Image.open(gt_mask_file) mask_np = np.asarray(gt_mask, dtype=np.int32) if len(mask_np.shape) > 2: assert len(mask_np.shape) == 3 mask_np = np.max(mask_np, axis=2) mask_np = np.where(mask_np > 0, 1, 0) iou = utils.get_iou(mask_np, pred_mask) results['iou'] = iou print(iou) # save img with minor delay if view_img: ext = filename.split('.')[-1] draw = vis.draw_with_blend_and_clicks(img, mask=pred_mask, clicks_list=clicks.clicks_list) filename = filename.split('.')[0] + f'[{len(clicks.clicks_list)}].jpg' result_path = TEMP_PATH + filename Image.fromarray(draw).save(result_path) # return send_file(result_path) results['result'] = filename return results
def _convert_to_segmentation(mask): contours = find_contours(mask, 0.5) # only one contour exist in our case contour = contours[0] contour = np.flip(contour, axis=1) # Approximate the contour and reduce the number of points contour = approximate_polygon(contour, tolerance=2.5) segmentation = contour.ravel().tolist() return segmentation
def segmentation(self, km, h, w): # Функция выделения сегментов контуров в изображении seg = np.asarray([(1 if i == 1 else 0) for i in km.labels_]).reshape( (h, w)) contours = measure.find_contours(seg, 0.5, fully_connected="high") simplified_contours = [ measure.approximate_polygon(c, tolerance=4) for c in contours ] return contours
def __init__(self, binarized_image, tolerance, name): self.contours = measure.find_contours(binarized_image, 0.5) self.contours = [np.array(contour, dtype=int) for contour in self.contours] self.tolerance = tolerance self.name = name coords_lists = [] for contour in self.contours: coords_lists.append(measure.approximate_polygon(contour, tolerance=self.tolerance)) self.graph = Graph(coords_lists, binarized_image.shape)
def group_objects(self, obj_struct, orig_map): neighbor_filter = np.ones([2 * self.group_radius + 1] * 2) temp_img = np.zeros(orig_map.shape) pixel_list = np.vstack(np.array(obj_struct['pixelList'])).transpose() temp_img[pixel_list[0], pixel_list[1]] = 1 neighbor_cnt = np.real( np.fft.ifft2( np.fft.fft2(temp_img) * np.fft.fft2(neighbor_filter, temp_img.shape))) # imfilt cnt_th = int(self.group_density * np.prod(neighbor_filter.shape)) group_map = neighbor_cnt > cnt_th group_map = ((group_map + orig_map) > 0).astype(np.int) object_lbl = measure.label(group_map) rps = measure.regionprops((object_lbl * orig_map).astype(np.int), orig_map) object_structure = pd.DataFrame(columns=[ 'iLocation', 'jLocation', 'pixelList', 'confidence', 'area', 'maxIntensity', 'isCommercial' ]) object_structure.pixelList = object_structure.pixelList.astype(object) polygon_list = [] for rp in rps: if rp.area >= self.min_region and rp.max_intensity >= self.thresh_max: # get contour info _, contours, _ = cv2.findContours( rp.convex_image.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(contours) > 0: assert len(contours) == 1 # Douglas-Peucker contours = measure.approximate_polygon( np.squeeze(contours[0], axis=1), self.epsilon) contours[:, 1] += rp.bbox[0] contours[:, 0] += rp.bbox[1] polygon_list.append(contours) else: polygon_list.append(np.array([])) temp = [ *[int(c) for c in rp.centroid], rp.coords, rp.mean_intensity, rp.area, rp.max_intensity, 0 ] object_structure = object_structure.append(dict( zip([ 'iLocation', 'jLocation', 'pixelList', 'confidence', 'area', 'maxIntensity', 'isCommercial' ], temp)), ignore_index=True) object_structure['polygon'] = pd.Series( polygon_list, index=object_structure.index).astype(object) return object_structure
def __init__(self, shape_samples): #TODO get the descriptors for the shape samples dev = shape_samples.device self.gt_descriptors = [] for shape_sample in shape_samples: shape_sample = shape_sample.cpu().numpy() contour = find_contours(shape_sample, level=0)[0] poly_approx = torch.from_numpy(approximate_polygon(contour, tolerance=1.2)).to(dev) self.gt_descriptors.append(Polygon2d(poly_approx))
def approx_polygon(new_object): """ Uses scikitimage approximate_polygon function to approximate polygons from a mask of floodfill output @parms new_object output mask from floodfill """ contour = find_contours(new_object, 0)[0] approx_polygon_coords = approximate_polygon(contour, tolerance=1) return (approx_polygon_coords)
def draw_polygon(lspread, lineno): """Draws a polygon around area of value lineno in array lspread.""" lspread = np.pad(lspread, 1, "constant", constant_values=0) cont = find_contours(np.where(lspread == lineno, lineno, 2 * lineno), lineno) if len(cont) == 1 and all(cont[0][0] == cont[0][-1]): polyg = approximate_polygon(cont[0], tolerance=1).astype(int) return [(p[0] - 1, p[1] - 1) for p in polyg] else: return []
def _interpolate_lines(clusters, elongation_offset, extent, st_map, end_map): """ Interpolates the baseline clusters and sets the correct line direction. """ logger.debug('Reticulating splines') lines = [] extent = geom.Polygon([(0, 0), (extent[1]-1, 0), (extent[1]-1, extent[0]-1), (0, extent[0]-1), (0, 0)]) f_st_map = maximum_filter(st_map, size=20) f_end_map = maximum_filter(end_map, size=20) for cluster in clusters[1:]: # find start-end point points = [point for edge in cluster for point in edge] dists = squareform(pdist(points)) i, j = np.unravel_index(dists.argmax(), dists.shape) # build adjacency matrix for shortest path algo adj_mat = np.full_like(dists, np.inf) for l, r in cluster: idx_l = points.index(l) idx_r = points.index(r) adj_mat[idx_l, idx_r] = dists[idx_l, idx_r] # shortest path _, pr = shortest_path(adj_mat, directed=False, return_predecessors=True, indices=i) k = j line = [points[j]] while pr[k] != -9999: k = pr[k] line.append(points[k]) # smooth line line = np.array(line[::-1]) line = approximate_polygon(line[:,[1,0]], 1) lr_dir = line[0] - line[1] lr_dir = (lr_dir.T / np.sqrt(np.sum(lr_dir**2,axis=-1))) * elongation_offset/2 line[0] = line[0] + lr_dir rr_dir = line[-1] - line[-2] rr_dir = (rr_dir.T / np.sqrt(np.sum(rr_dir**2,axis=-1))) * elongation_offset/2 line[-1] = line[-1] + rr_dir ins = geom.LineString(line).intersection(extent) if ins.type == 'MultiLineString': ins = linemerge(ins) # skip lines that don't merge cleanly if ins.type != 'LineString': continue line = np.array(ins, dtype='uint') l_end = tuple(line[0])[::-1] r_end = tuple(line[-1])[::-1] if f_st_map[l_end] - f_end_map[l_end] > 0.2 and f_st_map[r_end] - f_end_map[r_end] < -0.2: pass elif f_st_map[l_end] - f_end_map[l_end] < -0.2 and f_st_map[r_end] - f_end_map[r_end] > 0.2: line = line[::-1] else: logger.debug('Insufficient marker confidences in output. Defaulting to upright line.') if line[0][0] > line[-1][0]: line = line[::-1] lines.append(line.tolist()) return lines
def get_tumor_region(tumors, list_of_sequence): struct = disk(2) res = np.sum(tumors[..., list_of_sequence], axis=2) min_value = np.min(res) max_value = np.max(res) res = (((res - min_value) / (max_value - min_value)) * 255).astype(np.uint8) thresh = threshold_otsu(res) res_otsu = res > thresh #res_otsu = binary_opening(res_otsu, struct) label_image = label(res_otsu) regions = regionprops(label_image) for region in regions: if region.eccentricity > 0.9 or region.extent < 0.6 or region.area > 3000 or region.area < 150: centroid = tuple(int(x) for x in region.centroid) label_image = flood_fill(label_image, centroid, 0) label_image[label_image > 0] = 255 res[label_image == 0] = 0 l, num = label(label_image, return_num=True) if num < 1: label_image = label(res_otsu) regions = regionprops(label_image) for region in regions: if region.eccentricity > 0.9 or region.extent < 0.5 or region.area > 3000 or region.area < 150: centroid = tuple(int(x) for x in region.centroid) label_image = flood_fill(label_image, centroid, 0) label_image[label_image > 0] = 255 res[label_image == 0] = 0 l, num = label(label_image, return_num=True) regions = regionprops(l, intensity_image=res) mean_intensity = [x.mean_intensity for x in regions] max_intensity = np.max(mean_intensity) for index, region in enumerate(regions): if region.mean_intensity < max_intensity: centroid = tuple(int(x) for x in region.centroid) label_image = flood_fill(label_image, centroid, 0) area = regionprops(label_image)[0].area contour = find_contours(label_image, 0)[0] coords_region = approximate_polygon(contour, tolerance=0) return coords_region, area, label_image
def addPolygonToObjectStructure(self, predIm): polygons = [list() for _ in range(self.objectStructure.shape[0])] for r, reg in self.objectStructure.iterrows(): dummyImage = np.zeros(predIm.shape) pixl = reg['pixelList'].transpose() dummyImage[pixl[0], pixl[1]] = 1 _, contours, _ = cv2.findContours(dummyImage.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(np.array(contours).shape) == 1: contours = np.expand_dims(np.concatenate(contours, axis=0), axis=0) polygons[r] = measure.approximate_polygon(np.squeeze(contours), self.epsilon) # Douglas Peucker self.objectStructure['polygon'] = pd.Series(polygons, index=self.objectStructure.index).astype(object)
def get_features(filenames, full_filenames): X = [] imgs = imread_collection(full_filenames) for k, img in enumerate(imgs): temp = rgb2gray(img) val = threshold_li(temp) mask = temp < val temp[mask] = 0 temp[~mask] = 1 temp = binary_erosion(temp, disk(1)) temp = median(temp, selem=disk(2)) temp = ndi.binary_fill_holes(temp) labeled_image, num_labels = label(temp, return_num=True, connectivity=1) temp = remove_small_objects(labeled_image, 20000) contours = find_contours(temp, 0.1) approx = approximate_polygon(contours[0], tolerance=50) if approx[0].tolist() == approx[-1].tolist(): approx = approx[:-1] convex = ConvexHull(approx) number_points = len(convex.points) inner_points_indexes = list(set(range(number_points)).difference(set(convex.vertices))) if len(inner_points_indexes) > 4: inner_points_indexes = find_waste_points(convex.points, inner_points_indexes) points_indexes = find_points_order(convex.points, inner_points_indexes) elem = convex.points[points_indexes] feature_vec = [] for i in range(len(elem) - 1): dist = np.sqrt(sum((elem[i + 1] - elem[i]) ** 2)) feature_vec.append(dist) if len(elem) < 9: for i in range(9 - len(elem)): feature_vec.append(0) means = find_masked_means(img, temp) feature_vec.append(means[0]) feature_vec.append(means[1]) feature_vec.append(means[2]) X.append(feature_vec) im = Image.open(full_filenames[k]) draw = ImageDraw.Draw(im) for i in range(len(elem) - 1): draw.line((elem[i + 1][1], elem[i + 1][0], elem[i][1], elem[i][0]), fill=(0, 128, 0), width=3) del draw os.makedirs('out', exist_ok=True) out_filename = 'out/out_' + filenames[k] im.save(out_filename) return X
def get_max_contour_coor(img): max_contour = 0 max_coor = [] # 寻找最大轮廓 for contour in measure.find_contours(img, 0): # tolerance75 寻找矩形轮廓,忽略可能存在的反光斑突起 coord = measure.approximate_polygon(contour, tolerance=75) if len(contour) > max_contour: max_contour = len(contour) max_coor = coord return len(max_coor), max_coor
def inferRegions(self, imagePath): image = skimage.io.imread(imagePath) image = color.gray2rgb(image) hight = image.shape[0] width = image.shape [1] #tolerance mask => polygon tolerance = math.sqrt((width*hight) * 0.00005) print (tolerance) # Run detection results = self.model.detect([image], verbose=1) r = results[0] ### extract polygons regionList = [] for regionNumber in range (r['rois'].shape[0]): mask = r['masks'][:, :, regionNumber] # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) # get only largest contour ??? # verts = contours [0] # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 #approximate appr_polygon = approximate_polygon(verts, tolerance) region = editor.TextRegion(appr_polygon, "regionID", 0, None, None, None) print (r['class_ids'][regionNumber]) print (len (self.class_names)) region.regionName = self.class_names[r['class_ids'][regionNumber]] regionList.append(region) return regionList
def get_contour(image): image_data = plt.imread(image) gimg = color.colorconv.rgb2grey(image_data) bwimg = gimg > 0 bwimg = binary_dilation(bwimg, None) bwimg = ndimage.binary_fill_holes(bwimg) bwimg = binary_erosion(bwimg) contours = [approximate_polygon(new_s, 0.9) for new_s in measure.find_contours(bwimg, 0.5)] return bwimg, contours
def compute_rectangles(labels, label_indices, tolerance_frac=0.03): rectangles = [] for n in label_indices: mask = labels == n # find_contours returns (row, col) coords, contour will wind clockwise contours = measure.find_contours(mask, 0.5) contour = sorted(contours, key=len)[-1] tolerance = compute_tolerance(contour, tolerance_frac) corners = measure.approximate_polygon(contour, tolerance=tolerance) corners = filter_ends(corners) rectangles.append(corners) return rectangles
def get_KITTI_dicts(img_dir): dataset_dicts = [] tmp = os.path.join(img_dir, 'annotations_raw') for ann in os.listdir(tmp): imgs_anns = open(os.path.join(tmp, ann), 'r') imgs_anns = imgs_anns.read().splitlines() prev_img_id = -1 record = {} objs = [] for _, v in enumerate(imgs_anns): v = v.split() number_of_folder = int(ann[:4]) number_of_frame = int(v[0]) filename = os.path.join( img_dir, 'train/' + str(10000 * number_of_folder + number_of_frame)) height, width = int(v[3]), int(v[4]) if (int(v[1]) == 10000): continue if (prev_img_id != 10000 * number_of_folder + number_of_frame): if (objs != []): record["annotations"] = deepcopy(objs) dataset_dicts.append(deepcopy(record)) objs.clear() record["file_name"] = filename + '.png' record["image_id"] = 10000 * number_of_folder + number_of_frame record["height"] = height record["width"] = width rle2 = {"size": [height, width], "counts": v[5]} result_mask = decode(rle2) encoded_mask = encode(result_mask) result_bbox = toBbox(encoded_mask) contours = measure.find_contours(result_mask, 0.5) polygons = measure.approximate_polygon(np.flip(contours[0], axis=1), tolerance=0) obj = { "bbox": list(result_bbox), "bbox_mode": BoxMode.XYWH_ABS, "segmentation": rle2, #[polygons.tolist()], "category_id": int(v[2]) - 1, "iscrowd": 0 } objs.append(deepcopy(obj)) prev_img_id = 10000 * number_of_folder + number_of_frame print("Done: ", ann) return dataset_dicts
def get_contours(contours, tolerance=0.0005): '''Concatenate a list of contours and approximate it if possible''' global_contour = { 'vertices': [], 'segments': [], 'holes': [], 'triangles': [], } previous_region_index = 0 vertex_index = 0 for c in contours: triangle = [] c = c.copy() c -= PADDING # Remove padding for axis in range(2): c[..., axis] *= (shape[axis] + PADDING * 2) / shape[ axis] # Scale back to pre-padding on each axis c /= np.array(padded_array.shape) # Divide to get a 1x1 square. c[..., 1] *= shape[1] / shape[0] # Get aspect ratio back # Simplify polygon c = measure.approximate_polygon(c, tolerance) global_contour['vertices'].extend(c) for pt_i in range(len(c) - 1): global_contour['segments'].append([ pt_i + previous_region_index, pt_i + previous_region_index + 1 ]) if cut_type == 'CONTOURS': triangle.append(pt_i + previous_region_index) global_contour['segments'].append( [len(c) - 1 + previous_region_index, previous_region_index]) if cut_type == 'CONTOURS': global_contour['triangles'].append(triangle) previous_region_index += len(c) # Add hole if polygon ccw if is_polygon_clockwise(c): c_ar = np.array(c) global_contour['holes'].append( [c_ar[..., 0].mean(), c_ar[..., 1].mean()]) # TODO: find point inside concave polygons for k in ['holes', 'triangles']: if not global_contour[k]: del global_contour[k] return global_contour
def show_conts(cont, shape, tolerance): """Helper to find a good setting for <tolerance>""" cont_image = np.zeros(shape) approx_image = np.zeros(shape) rr, cc = polygon_perimeter(cont[:, 0], cont[:, 1]) cont_image[rr, cc] = 1 poly_approx = approximate_polygon(cont, tolerance=tolerance) rra, cca = polygon_perimeter(poly_approx[:, 0], poly_approx[:, 1]) approx_image[rra, cca] = 1 plt.imshow(cont_image) plt.show() plt.imshow(approx_image) plt.show()
def to_contours(): for root, dirs, files in os.walk('data2'): for fname in files: dst = io.imread('data2/' + fname, as_grey=True) contours = measure.find_contours(dst, 0.5) cords = np.concatenate(contours) new_img = measure.subdivide_polygon(cords, degree=2, preserve_ends=True) appr_img = measure.approximate_polygon(new_img, tolerance=1) print(fname, len(appr_img.tolist()))
def _extract_patch(env_up, env_bottom, baseline, dir_vec): """ Calculate a line image patch from a ROI and the original baseline. """ upper_polygon = np.concatenate((baseline, env_up[::-1])) bottom_polygon = np.concatenate((baseline, env_bottom[::-1])) angle = np.arctan2(dir_vec[1], dir_vec[0]) upper_seam = _calc_seam(baseline, upper_polygon, angle) bottom_seam = _calc_seam(baseline, bottom_polygon, angle) polygon = np.concatenate(([baseline[0]], upper_seam.astype('int'), [baseline[-1]], bottom_seam.astype('int')[::-1])) return approximate_polygon(polygon, 3).tolist()
def find_base(img): contours = find_contours(img, 0) max_distance = 0 base_line = () coords = approximate_polygon(contours[0], tolerance=2.5) for i in range(len(coords) - 1): distance = math.sqrt((coords[i + 1][0] - coords[i][0])**2 + (coords[i + 1][1] - coords[i][1])**2) if distance > max_distance: max_distance = distance base_line = [[coords[i + 1][1], coords[i + 1][0]], [coords[i][1], coords[i][0]]] return base_line
def generate(id_file): #name_file_tile = "tile_awesome_" + str(id_file) + ".png" #name_file_tile = "" + str(id_file) + ".png" name_file_tile = "tile_awesome_" + str(id_file) + ".png" file_tile = "../../wp-admin/img/simulator_tile/" + name_file_tile print "file_image:", file_tile fimg = misc.imread(file_tile) gimg = color.colorconv.rgb2grey(fimg) contours = measure.find_contours(gimg, 0.7) #0.93 # Build Json object data = {} data['polygons'] = [] id_poly = 0 coords = {} max_w = 0 max_h = 0 print "init()" for n, contour in enumerate(contours): coords = approximate_polygon(contour, tolerance=0.5) plt.plot(contour[:, 1], contour[:, 0], linewidth=0.5) #plt.fill_between(contour[:, 1], contour[:, 0], color='grey', alpha='0.5') s = "" points = []; for x in coords: for y in x: points.append(y); l = [] value = 0.0 flag = True for i in points: if flag: # x value = i max_w = max(max_w, value) else: # y l.append([value, i]) max_h = max(max_h, i) flag = not flag area = polygon_area(l) print('Irregular polygon area: {}'.format(abs(area))) data['polygons'].append({'_id': (id_poly + 1), 'points': points, 'area':abs(area) }) id_poly = id_poly + 1 print("number of edge [", str(id_poly),"]") print("w", max_w, " h", max_h) data['properties'] = {'width':max_w, 'height':max_h} file = '../../wp-admin/data/simulator_tile/' + name_file_tile + '.json' print "file_json:", file with open(file, 'w') as outfile: json.dump(data, outfile, indent=3) plt.show()
def findCorners(img): result=[] img1 = img[:,:,-1] img2 = np.zeros((img1.shape[0] + 20, img1.shape[1] + 20)) img2[10:img1.shape[0] + 10, 10:img1.shape[1] + 10] = img1 contours = find_contours(img2, 0) aprx = approximate_polygon(contours[0], tolerance=10.0) aprx4 = aprox(aprx) for i in range(0, 4): result.append(aprx4[i].tolist()) for i in range(0,len(result)): result[i].reverse() for i in range(0, len(result)): for j in range(0, len(result[i])): result[i][j] -= 10 return result
def __call__(self, roi): smoothed_polygons = [] coords = roi.coords for polygon in coords: if polygon.shape[0] > self.min_verts: plane = polygon[0, -1] smoothed_coords = approximate_polygon(polygon[:, :2], self.tolerance) smoothed_coords = np.hstack( (smoothed_coords, plane*np.ones( (smoothed_coords.shape[0], 1)))) else: smoothed_coords = polygon smoothed_polygons += [smoothed_coords] return ROI(polygons=smoothed_polygons, im_shape=roi.im_shape)
def exec_shape_match(self, img, show=False): polys = [] if img is not None: out_img = np.ones(img.shape, dtype=np.uint8) contours = self.get_contours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for h, contour in enumerate(contours): if self.get_contour_area(contour) > 2000: new_s = np.array(contour[:, 0]) appr_s = approximate_polygon(new_s, tolerance=5) polys.append(appr_s) if show: cv2.drawContours(out_img, [appr_s], 0, GRAY, -1) else: out_img = img return out_img, polys
def find_targets(self, src): src = copy(src) for contour in find_contours(src, 0): coords = approximate_polygon(contour, tolerance=0) x, y = coords.T rr, cc = polygon_perimeter(y, x) src[cc, rr] = 100 lsrc = label(src) r, c = src.shape ts = [] for rp in regionprops(lsrc): cy, cx = rp.centroid cy += 1 cx += 1 tx, ty = cx - c / 2., cy - r / 2. src[cy, cx] = 175 t = int(tx), int(ty) if t not in ts: ts.append((rp, t)) return ts, src
if (distance(l1[-1], l2[0]) < t): mp = middle_point(l1[-1], l2[0]) l1[-1] = mp l2[0] = mp if (distance(l1[-1], l2[-1]) < t): mp = middle_point(l1[-1], l2[-1]) l1[-1] = mp l2[-1] = mp for i in xrange(30): merge_np(lines) # remove singular lines lines = filter(lambda x: not (len(x) < 2 or (len(x) == 2 and distance(x[0],x[1]) < 2)), lines) appr_lines = map(lambda x : approximate_polygon(np.array(x), tolerance=1.5), lines) # snap to bounds sz = 255 def round_snap_bound(v): r = int(round(v)) if (r <= 2): return 0 if (r >= sz - 2): return sz return r snap_lines = map(lambda x : map(lambda p: (round_snap_bound(p[0]),round_snap_bound(p[1])), x), appr_lines)
def process_file(img_id, par, par2, vgg_big_path, vgg_small_path, linknet_small_path, small_res_file_path, inc_file_path, vgg_smallest_file_path, inc_smallest_file_path, res_smallest_file_path, inc3_520_file_path, inc_v2_520_file_path, linknet_big_file_path, linknet_520_file_path, vgg_big_path_1, vgg_smallest_file_path_1, inc_smallest_file_path_1, res_smallest_file_path_1, inc3_520_file_path_1, inc_v2_520_file_path_1, linknet_big_file_path_1, linknet_520_file_path_1, save_to=None): res_rows = [] if vgg_small_path is None: msk = np.zeros((1300, 1300)) else: msk = cv2.imread(vgg_small_path, cv2.IMREAD_UNCHANGED) msk = cv2.resize(msk, (1300, 1300)) if linknet_small_path is None: msk2 = np.zeros((1300, 1300)) else: msk2 = cv2.imread(linknet_small_path, cv2.IMREAD_UNCHANGED) msk2 = cv2.resize(msk2, (1300, 1300)) if vgg_big_path is None: msk3 = np.zeros((1300, 1300)) msk3_1 = np.zeros((1300, 1300)) else: msk3 = cv2.imread(vgg_big_path, cv2.IMREAD_UNCHANGED) msk3_1 = cv2.imread(vgg_big_path_1, cv2.IMREAD_UNCHANGED) if small_res_file_path is None: res_msk = np.zeros((1300, 1300)) else: res_msk = cv2.imread(small_res_file_path, cv2.IMREAD_UNCHANGED) res_msk = cv2.resize(res_msk, (1300, 1300)) if inc_file_path is None: inc_msk = np.zeros((1300, 1300)) else: inc_msk = cv2.imread(inc_file_path, cv2.IMREAD_UNCHANGED) inc_msk = cv2.resize(inc_msk, (1300, 1300)) if vgg_smallest_file_path is None: vgg_smlst_msk = np.zeros((1300, 1300)) vgg_smlst_msk_1 = np.zeros((1300, 1300)) else: vgg_smlst_msk = cv2.imread(vgg_smallest_file_path, cv2.IMREAD_UNCHANGED) vgg_smlst_msk = cv2.resize(vgg_smlst_msk, (1300, 1300)) vgg_smlst_msk_1 = cv2.imread(vgg_smallest_file_path_1, cv2.IMREAD_UNCHANGED) vgg_smlst_msk_1 = cv2.resize(vgg_smlst_msk_1, (1300, 1300)) if inc_smallest_file_path is None: inc_smlst_msk = np.zeros((1300, 1300)) inc_smlst_msk_1 = np.zeros((1300, 1300)) else: inc_smlst_msk = cv2.imread(inc_smallest_file_path, cv2.IMREAD_UNCHANGED) inc_smlst_msk = cv2.resize(inc_smlst_msk, (1300, 1300)) inc_smlst_msk_1 = cv2.imread(inc_smallest_file_path_1, cv2.IMREAD_UNCHANGED) inc_smlst_msk_1 = cv2.resize(inc_smlst_msk_1, (1300, 1300)) if res_smallest_file_path is None: res_smlst_msk = np.zeros((1300, 1300)) res_smlst_msk_1 = np.zeros((1300, 1300)) else: res_smlst_msk = cv2.imread(res_smallest_file_path, cv2.IMREAD_UNCHANGED) res_smlst_msk = cv2.resize(res_smlst_msk, (1300, 1300)) res_smlst_msk_1 = cv2.imread(res_smallest_file_path_1, cv2.IMREAD_UNCHANGED) res_smlst_msk_1 = cv2.resize(res_smlst_msk_1, (1300, 1300)) if inc3_520_file_path is None: inc3_520_msk = np.zeros((1300, 1300)) inc3_520_msk_1 = np.zeros((1300, 1300)) else: inc3_520_msk = cv2.imread(inc3_520_file_path, cv2.IMREAD_UNCHANGED) inc3_520_msk = cv2.resize(inc3_520_msk, (1300, 1300)) inc3_520_msk_1 = cv2.imread(inc3_520_file_path_1, cv2.IMREAD_UNCHANGED) inc3_520_msk_1 = cv2.resize(inc3_520_msk_1, (1300, 1300)) if inc_v2_520_file_path is None: inc_v2_520_msk = np.zeros((1300, 1300)) inc_v2_520_msk_1 = np.zeros((1300, 1300)) else: inc_v2_520_msk = cv2.imread(inc_v2_520_file_path, cv2.IMREAD_UNCHANGED) inc_v2_520_msk = cv2.resize(inc_v2_520_msk, (1300, 1300)) inc_v2_520_msk_1 = cv2.imread(inc_v2_520_file_path_1, cv2.IMREAD_UNCHANGED) inc_v2_520_msk_1 = cv2.resize(inc_v2_520_msk_1, (1300, 1300)) if linknet_big_file_path is None: link_big_msk = np.zeros((1300, 1300)) link_big_msk_1 = np.zeros((1300, 1300)) else: link_big_msk = cv2.imread(linknet_big_file_path, cv2.IMREAD_UNCHANGED) link_big_msk_1 = cv2.imread(linknet_big_file_path_1, cv2.IMREAD_UNCHANGED) if linknet_520_file_path is None: link_520_msk = np.zeros((1300, 1300)) link_520_msk_1 = np.zeros((1300, 1300)) else: link_520_msk = cv2.imread(linknet_520_file_path, cv2.IMREAD_UNCHANGED) link_520_msk = cv2.resize(link_520_msk, (1300, 1300)) link_520_msk_1 = cv2.imread(linknet_520_file_path_1, cv2.IMREAD_UNCHANGED) link_520_msk_1 = cv2.resize(link_520_msk_1, (1300, 1300)) msk3 = (msk3 * 0.5 + msk3_1 * 0.5) inc_smlst_msk = (inc_smlst_msk * 0.5 + inc_smlst_msk_1 * 0.5) vgg_smlst_msk = (vgg_smlst_msk * 0.5 + vgg_smlst_msk_1 * 0.5) res_smlst_msk = (res_smlst_msk * 0.5 + res_smlst_msk_1 * 0.5) inc3_520_msk = (inc3_520_msk * 0.5 + inc3_520_msk_1 * 0.5) inc_v2_520_msk = (inc_v2_520_msk * 0.5 + inc_v2_520_msk_1 * 0.5) link_big_msk = (link_big_msk * 0.5 + link_big_msk_1 * 0.5) link_520_msk = (link_520_msk * 0.5 + link_520_msk_1 * 0.5) coef = [] tot_sum = par[:12].sum() for i in range(12): coef.append(par[i] / tot_sum) msk = (msk * coef[0] + msk2 * coef[1] + msk3 * coef[2] + res_msk * coef[3] + inc_msk * coef[4] + vgg_smlst_msk * coef[5] + inc_smlst_msk * coef[6] + res_smlst_msk * coef[7] + inc3_520_msk * coef[8] + inc_v2_520_msk * coef[9] + link_big_msk * coef[10] + link_520_msk * coef[11]) msk = msk.astype('uint8') if save_to is not None: cv2.imwrite(save_to, msk, [cv2.IMWRITE_PNG_COMPRESSION, 9]) msk2 = np.lib.pad(msk, ((22, 22), (22, 22)), 'symmetric') thr = par[12] msk2 = 1 * (msk2 > thr) msk2 = msk2.astype(np.uint8) if par2[0] > 0: msk2 = dilation(msk2, square(par2[0])) if par2[1] > 0: msk2 = erosion(msk2, square(par2[1])) if 'Shanghai' in img_id: skeleton = medial_axis(msk2) else: skeleton = skeletonize_3d(msk2) skeleton = skeleton[22:1322, 22:1322] lbl0 = label(skeleton) props0 = regionprops(lbl0) cnt = 0 crosses = [] for x in range(1300): for y in range(1300): if skeleton[y, x] == 1: if skeleton[max(0, y-1):min(1300, y+2), max(0, x-1):min(1300, x+2)].sum() > 3: cnt += 1 crss = [] crss.append((x, y)) for y0 in range(max(0, y-1), min(1300, y+2)): for x0 in range(max(0, x-1), min(1300, x+2)): if x == x0 and y == y0: continue if skeleton[max(0, y0-1):min(1300, y0+2), max(0, x0-1):min(1300, x0+2)].sum() > 3: crss.append((x0, y0)) crosses.append(crss) cross_hashes = [] for crss in crosses: crss_hash = set([]) for x0, y0 in crss: crss_hash.add(point_hash(x0, y0)) skeleton[y0, x0] = 0 cross_hashes.append(crss_hash) new_crosses = [] i = 0 while i < len(crosses): new_hashes = set([]) new_hashes.update(cross_hashes[i]) new_crss = crosses[i][:] fl = True while fl: fl = False j = i + 1 while j < len(crosses): if len(new_hashes.intersection(cross_hashes[j])) > 0: new_hashes.update(cross_hashes[j]) new_crss.extend(crosses[j]) cross_hashes.pop(j) crosses.pop(j) fl = True break j += 1 mean_p = np.asarray(new_crss).mean(axis=0).astype('int') if len(new_crss) > 1: t = KDTree(new_crss) mean_p = new_crss[t.query(mean_p[np.newaxis, :])[1][0][0]] new_crosses.append([(mean_p[0], mean_p[1])] + new_crss) i += 1 crosses = new_crosses lbl = label(skeleton) props = regionprops(lbl) connected_roads = [] connected_crosses = [set([]) for p in props] for i in range(len(crosses)): rds = set([]) for j in range(len(crosses[i])): x, y = crosses[i][j] for y0 in range(max(0, y-1), min(1300, y+2)): for x0 in range(max(0, x-1), min(1300, x+2)): if lbl[y0, x0] > 0: rds.add(lbl[y0, x0]) connected_crosses[lbl[y0, x0]-1].add(i) connected_roads.append(rds) res_roads = [] tot_dist_min = par2[2] coords_min = par2[3] for i in range(len(props)): coords = props[i].coords crss = list(connected_crosses[i]) tot_dist = props0[lbl0[coords[0][0], coords[0][1]]-1].area if (tot_dist < tot_dist_min) or (coords.shape[0] < coords_min and len(crss) < 2): continue if coords.shape[0] == 1: coords = np.asarray([coords[0], coords[0]]) else: coords = get_ordered_coords(lbl, i+1, coords) for j in range(len(crss)): x, y = crosses[crss[j]][0] d1 = abs(coords[0][0] - y) + abs(coords[0][1] - x) d2 = abs(coords[-1][0] - y) + abs(coords[-1][1] - x) if d1 < d2: coords[0][0] = y coords[0][1] = x else: coords[-1][0] = y coords[-1][1] = x coords_approx = approximate_polygon(coords, 1.5) res_roads.append(coords_approx) hashes = set([]) final_res_roads = [] for r in res_roads: if r.shape[0] > 2: final_res_roads.append(r) for i in range(1, r.shape[0]): p1 = r[i-1] p2 = r[i] h1 = pair_hash(p1, p2) h2 = pair_hash(p2, p1) hashes.add(h1) hashes.add(h2) for r in res_roads: if r.shape[0] == 2: p1 = r[0] p2 = r[1] h1 = pair_hash(p1, p2) h2 = pair_hash(p2, p1) if not (h1 in hashes or h2 in hashes): final_res_roads.append(r) hashes.add(h1) hashes.add(h2) end_points = {} for r in res_roads: h = point_hash(r[0, 0], r[0, 1]) if not (h in end_points.keys()): end_points[h] = 0 end_points[h] = end_points[h] + 1 h = point_hash(r[-1, 0], r[-1, 1]) if not (h in end_points.keys()): end_points[h] = 0 end_points[h] = end_points[h] + 1 road_msk = np.zeros((1300, 1300), dtype=np.int32) road_msk = road_msk.copy() thickness = 1 for j in range(len(final_res_roads)): l = final_res_roads[j] for i in range(len(l) - 1): cv2.line(road_msk, (int(l[i, 1]), int(l[i, 0])), (int(l[i+1, 1]), int(l[i+1, 0])), j+1, thickness) connect_dist = par2[4] min_prob = par2[5] angles_to_check = [0, radians(5), radians(-5), radians(10), radians(-10), radians(15), radians(-15)] if 'Paris' in img_id or 'Vegas' in img_id: angles_to_check += [radians(20), radians(-20), radians(25), radians(-25)] add_dist = par2[6] add_dist2 = par2[7] con_r = par2[8] for i in range(len(final_res_roads)): h = point_hash(final_res_roads[i][0, 0], final_res_roads[i][0, 1]) if end_points[h] == 1: p1 = final_res_roads[i][1] p2 = final_res_roads[i][0] p3 = try_connect(p1, p2, 0, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r) if p3 is not None: h1 = pair_hash(p2, p3) h2 = pair_hash(p3, p2) if not (h1 in hashes or h2 in hashes): r_id = road_msk[p3[0], p3[1]] - 1 final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3) hashes.update(new_hashes) tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32) tmp_road_msk = tmp_road_msk.copy() cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness) road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0] road_msk = road_msk.copy() final_res_roads[i] = np.vstack((p3, final_res_roads[i])) hashes.add(h1) hashes.add(h2) end_points[point_hash(p3[0], p3[1])] = 2 h = point_hash(final_res_roads[i][-1, 0], final_res_roads[i][-1, 1]) if end_points[h] == 1: p1 = final_res_roads[i][-2] p2 = final_res_roads[i][-1] p3 = try_connect(p1, p2, 0, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r) if p3 is not None: h1 = pair_hash(p2, p3) h2 = pair_hash(p3, p2) if not (h1 in hashes or h2 in hashes): r_id = road_msk[p3[0], p3[1]] - 1 final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3) hashes.update(new_hashes) tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32) tmp_road_msk = tmp_road_msk.copy() cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness) road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0] road_msk = road_msk.copy() final_res_roads[i] = np.vstack((final_res_roads[i], p3)) hashes.add(h1) hashes.add(h2) end_points[point_hash(p3[0], p3[1])] = 2 for i in range(len(final_res_roads)): h = point_hash(final_res_roads[i][0, 0], final_res_roads[i][0, 1]) if end_points[h] == 1: p1 = final_res_roads[i][1] p2 = final_res_roads[i][0] p3 = None for a in angles_to_check: p3 = try_connect(p1, p2, a, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r) if p3 is not None: break if p3 is not None: h1 = pair_hash(p2, p3) h2 = pair_hash(p3, p2) if not (h1 in hashes or h2 in hashes): r_id = road_msk[p3[0], p3[1]] - 1 final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3) hashes.update(new_hashes) tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32) tmp_road_msk = tmp_road_msk.copy() cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness) road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0] road_msk = road_msk.copy() final_res_roads[i] = np.vstack((p3, final_res_roads[i])) hashes.add(h1) hashes.add(h2) end_points[point_hash(p3[0], p3[1])] = 2 else: p3 = get_next_point(p1, p2, add_dist) if not (p3[0] < 2 or p3[1] < 2 or p3[0] > 1297 or p3[1] > 1297): p3 = get_next_point(p1, p2, add_dist2) if (p3[0] != p2[0] or p3[1] != p2[1]) and (road_msk[p3[0], p3[1]] == 0): h1 = pair_hash(p2, p3) h2 = pair_hash(p3, p2) if not (h1 in hashes or h2 in hashes): final_res_roads[i] = np.vstack((p3, final_res_roads[i])) hashes.add(h1) hashes.add(h2) tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32) tmp_road_msk = tmp_road_msk.copy() cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness) road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0] road_msk = road_msk.copy() end_points[point_hash(p3[0], p3[1])] = 2 h = point_hash(final_res_roads[i][-1, 0], final_res_roads[i][-1, 1]) if end_points[h] == 1: p1 = final_res_roads[i][-2] p2 = final_res_roads[i][-1] p3 = None for a in angles_to_check: p3 = try_connect(p1, p2, a, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r) if p3 is not None: break if p3 is not None: h1 = pair_hash(p2, p3) h2 = pair_hash(p3, p2) if not (h1 in hashes or h2 in hashes): r_id = road_msk[p3[0], p3[1]] - 1 final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3) hashes.update(new_hashes) tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32) tmp_road_msk = tmp_road_msk.copy() cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness) road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0] road_msk = road_msk.copy() final_res_roads[i] = np.vstack((final_res_roads[i], p3)) hashes.add(h1) hashes.add(h2) end_points[point_hash(p3[0], p3[1])] = 2 else: p3 = get_next_point(p1, p2, add_dist) if not (p3[0] < 2 or p3[1] < 2 or p3[0] > 1297 or p3[1] > 1297): p3 = get_next_point(p1, p2, add_dist2) if (p3[0] != p2[0] or p3[1] != p2[1]) and (road_msk[p3[0], p3[1]] == 0): h1 = pair_hash(p2, p3) h2 = pair_hash(p3, p2) if not (h1 in hashes or h2 in hashes): final_res_roads[i] = np.vstack((final_res_roads[i], p3)) hashes.add(h1) hashes.add(h2) tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32) tmp_road_msk = tmp_road_msk.copy() cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness) road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0] road_msk = road_msk.copy() end_points[point_hash(p3[0], p3[1])] = 2 lines = [LineString(r[:, ::-1]) for r in final_res_roads] if len(lines) == 0: res_rows.append({'ImageId': img_id, 'WKT_Pix': 'LINESTRING EMPTY'}) else: for l in lines: res_rows.append({'ImageId': img_id, 'WKT_Pix': dumps(l, rounding_precision=0)}) return res_rows
def coaddPatchNoData(rootDir, tract, patch, filter, prefix='hsc_coadd', savePNG=True, verbose=True, tolerence=4, minArea=10000, clobber=False, butler=None, dataId=None, workDir='', starMask=False, notDeblend=True): """ Generate NoData Mask for one Patch. Parameters: """ pipeVersion = dafPersist.eupsVersions.EupsVersions().versions['hscPipe'] if StrictVersion(pipeVersion) >= StrictVersion('3.9.0'): coaddData = "deepCoadd_calexp" else: coaddData = "deepCoadd" # Get the name of the wkb and deg file strTractPatch = (str(tract).strip() + '_' + patch + '_' + filter) # For all the accepted regions if (workDir is not '') and (workDir[-1] is not '/'): workDir += '/' noDataAllWkb = workDir + prefix + '_' + strTractPatch + '_nodata_all.wkb' fileExist1 = os.path.isfile(noDataAllWkb) noDataAllReg = workDir + prefix + '_' + strTractPatch + '_nodata_all.reg' fileExist2 = os.path.isfile(noDataAllReg) # For all the big mask regions noDataBigWkb = workDir + prefix + '_' + strTractPatch + '_nodata_big.wkb' noDataBigReg = workDir + prefix + '_' + strTractPatch + '_nodata_big.reg' # See if all the files have been generated fileAllExist = (fileExist1 and fileExist2) # Only generate new one when # 1) Not all files are available # 2) All available, but clobber = True if (not fileAllExist) or clobber: # Make a butler and specify the dataID if butler is None: butler = dafPersist.Butler(rootDir) if dataId is None: dataId = {'tract': tract, 'patch': patch, 'filter': filter} # Get the name of the input fits image if StrictVersion(pipeVersion) >= StrictVersion('3.9.0'): coaddImg = '%s/calexp-%s-%s-%s.fits' % (patch, filter, tract, patch) else: coaddImg = '%s.fits' % (patch) if rootDir[-1] is '/': fitsName = (rootDir + 'deepCoadd/' + filter + '/' + str(tract).strip() + '/' + coaddImg) else: fitsName = (rootDir + '/deepCoadd/' + filter + '/' + str(tract).strip() + '/' + coaddImg) if not os.path.isfile(fitsName): raise Exception('Can not find the input fits image: %s' % fitsName) # Get the name of the png file titlePng = prefix + strTractPatch + '_NODATA' noDataPng = prefix + '_' + strTractPatch + '_nodata.png' if verbose: print "## Reading Fits Image: %s" % fitsName # Get the exposure from the butler # TODO Be careful here, some of the coadd image files # on the disk are not useful try: calExp = butler.get(coaddData, dataId, immediate=True) except Exception: print "Oops! Can not read this image: %s !" % fitsName else: # Get the Bounding Box of the image bbox = calExp.getBBox(afwImage.PARENT) xBegin, yBegin = bbox.getBeginX(), bbox.getBeginY() # Get the WCS information imgWcs = calExp.getWcs() # Get the object for mask plane mskImg = calExp.getMaskedImage().getMask() # Extract the NO_DATA plane noData = copy.deepcopy(mskImg) noData.removeAndClearMaskPlane('EDGE', True) noData.removeAndClearMaskPlane('CLIPPED', True) noData.removeAndClearMaskPlane('CROSSTALK', True) noData.removeAndClearMaskPlane('UNMASKEDNAN', True) noData.removeAndClearMaskPlane('DETECTED', True) noData.removeAndClearMaskPlane('DETECTED_NEGATIVE', True) if not notDeblend: noData.removeAndClearMaskPlane('NOT_DEBLENDED', True) if not starMask: noData.removeAndClearMaskPlane('BRIGHT_OBJECT', True) # Return the mask image array noDataArr = noData.getArray() noDataArr[noDataArr > 0] = 10 # Pad the 2-D array by a little noDataArr = np.lib.pad(noDataArr, ((1, 1), (1, 1)), 'constant', constant_values=0) # Try a very different approach: Using the find_contours and # approximate_polygon methods from scikit-images package maskShapes = [] # For all the accepted mask regions maskCoords = [] # For the "corner" coordinates of these regions maskAreas = [] # The sizes of all regions # Only find the 0-level contour contoursAll = find_contours(noDataArr, 0) if verbose: print "### %d contours have been detected" % len(contoursAll) for maskContour in contoursAll: # Approximate one extracted contour into a polygon # tolerance decides the accuracy of the polygon, hence # the number of coords for each polygon. # Using large tolerance also means smaller number of final # polygons contourCoords = approximate_polygon(maskContour, tolerance=tolerence) # Convert these coordinates into (RA, DEC) using the WCS contourSkyCoords = map(lambda x: [x[1], x[0]], contourCoords) contourRaDec = map(lambda x: getPixelRaDec(imgWcs, x[0], x[1], xStart=xBegin, yStart=yBegin), contourSkyCoords) # Require that any useful region must be at least an triangular if len(contourCoords) > 3: # Form a lineString using these coordinates maskLine = LineString(contourRaDec) # Check if the lineString is valid and simple, # so can be used to form a closed and simple polygon # if maskLine.is_valid and maskLine.is_simple: if maskLine.is_valid: contourPoly = Polygon(contourRaDec) # Fix the self-intersected polygon !! VERY USEFUL if not contourPoly.is_valid: contourPoly = contourPoly.buffer(0) maskShapes.append(contourPoly) maskCoords.append(contourRaDec) maskAreas.append(Polygon(contourCoords).area) if verbose: print "### %d regions are useful" % len(maskAreas) # Isolate the large ones maskBigList = np.array(maskShapes)[np.where(np.array(maskAreas) > minArea)] maskBigList = map(lambda x: x, maskBigList) nBig = len(maskBigList) if nBig > 0: if verbose: print "### %d regions are large enough: " % nBig # Save all the masked regions to a .reg file polySaveReg(maskBigList, noDataBigReg, listPoly=True, color='blue') # Also create a MultiPolygon object, and save a .wkb file maskBig = cascaded_union(maskBigList) cdPatch.polySaveWkb(maskBig, noDataBigWkb) else: maskBig = None if verbose: print "### No region is larger than the minimum mask sizes" # Save all the masked regions to a .reg file polySaveReg(maskShapes, noDataAllReg, listPoly=True, color='red') # Also create a MultiPolygon object, and save a .wkb file maskAll = cascaded_union(maskShapes) cdPatch.polySaveWkb(maskAll, noDataAllWkb) if savePNG: if maskBig is None: showNoDataMask(noDataAllWkb, title=titlePng, pngName=noDataPng) else: showNoDataMask(noDataAllWkb, large=noDataBigWkb, title=titlePng, pngName=noDataPng) else: if verbose: print "### %d, %s has been reduced before! Skip!" % (tract, patch)
[2.2016129, 2.734375], [2.25403226, 2.60416667], [2.14919355, 1.953125], [2.30645161, 2.36979167], [2.39112903, 2.36979167], [2.41532258, 2.1875], [2.1733871, 1.703125], [2.07782258, 1.16666667]]) # subdivide polygon using 2nd degree B-Splines new_hand = hand.copy() for _ in range(5): new_hand = subdivide_polygon(new_hand, degree=2, preserve_ends=True) # approximate subdivided polygon with Douglas-Peucker algorithm appr_hand = approximate_polygon(new_hand, tolerance=0.02) print("Number of coordinates:", len(hand), len(new_hand), len(appr_hand)) fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(9, 4)) ax1.plot(hand[:, 0], hand[:, 1]) ax1.plot(new_hand[:, 0], new_hand[:, 1]) ax1.plot(appr_hand[:, 0], appr_hand[:, 1]) # create two ellipses in image img = np.zeros((800, 800), 'int32') rr, cc = ellipse(250, 250, 180, 230, img.shape) img[rr, cc] = 1 rr, cc = ellipse(600, 600, 150, 90, img.shape)
def approximate_polygon(coords, tolerance): '''see skimage.measure.approximate_polygon''' return measure.approximate_polygon(coords, tolerance)
if (distance(l1[-1], l2[0]) < t): mp = middle_point(l1[-1], l2[0]) l1[-1] = mp l2[0] = mp if (distance(l1[-1], l2[-1]) < t): mp = middle_point(l1[-1], l2[-1]) l1[-1] = mp l2[-1] = mp for i in range(30): merge_np(lines) # remove singular lines lines = [x for x in lines if not (len(x) < 2 or (len(x) == 2 and distance(x[0],x[1]) < 2))] appr_lines = [approximate_polygon(np.array(x), tolerance=1.5) for x in lines] # snap to bounds sz = 255 def round_snap_bound(v): r = int(round(v)) if (r <= 2): return 0 if (r >= sz - 2): return sz return r snap_lines = [[(round_snap_bound(p[0]),round_snap_bound(p[1])) for p in x] for x in appr_lines]