def shapesPlot(shapes,inds,fig,ax): from skimage.measure import label,regionprops from skimage import feature from skimage.morphology import binary_dilation from skimage.segmentation import find_boundaries import pylab as plt import numpy as np #fig = plt.figure() #ax = fig.add_subplot(111) sz = np.int32(shapes.shape) for i in inds: img = shapes[i,:,:] mx = img[:].max() test = img>0.4*mx test2 = binary_dilation(binary_dilation(test)) lbls = label(test2) rgs = regionprops(lbls) if np.size(rgs)>0: szs = [] for prop in rgs: szs.append(prop.area) ind = np.argmax(szs) if rgs[ind].area>100: pt = rgs[ind].centroid region = lbls==ind+1 edges = find_boundaries(region) eln = edges.nonzero() ax.scatter(eln[1],eln[0],marker='.',color='r',linewidths=0.01) ax.text(pt[1]-4,pt[0]+4,'%i' % i,fontsize=14,color='k') return fig,ax
def findROI(footprint): dims = footprint.shape footprint = footprint.reshape(dims[0],dims[1],-1) ROI = zeros(footprint.shape) mxs = [] for i in range(np.size(footprint[0,0,:])): mxs.append(footprint.reshape(-1,1).max()) for i in range(np.size(footprint[0,0,:])): img = footprint[:,:,i].reshape(dims[0],dims[1]) mx = mxs[i] thresh = img>0.4*mx thresh2 = binary_dilation(binary_dilation(thresh)) lbls,marks = label(thresh2) rgs = regionprops(lbls) if np.size(rgs)>0: szs = [] for prop in rgs: szs.append(prop.area) ind = np.argmax(szs) if rgs[ind].area>100: region = lbls==ind+1 out = zeros([dims[0],dims[1]]) out[region] = 1 ROI[:,:,i] = out ROI = ROI.reshape(dims) return ROI
def segment_cells(frame, mask=None): """ Compute the initial segmentation based on ridge detection + watershed. This works reasonably well, but is not robust enough to use by itself. """ blurred = filters.gaussian_filter(frame, 2) ridges = enhance_ridges(frame) # threshold ridge image thresh = filters.threshold_otsu(ridges) thresh_factor = 0.6 prominent_ridges = ridges > thresh_factor*thresh prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256) prominent_ridges = morphology.binary_closing(prominent_ridges) prominent_ridges = morphology.binary_dilation(prominent_ridges) # skeletonize ridge_skeleton = morphology.medial_axis(prominent_ridges) ridge_skeleton = morphology.binary_dilation(ridge_skeleton) ridge_skeleton *= mask ridge_skeleton -= mask # label cell_label_im = measure.label(ridge_skeleton) # morphological closing to fill in the cracks for cell_num in range(1, cell_label_im.max()+1): cell_mask = cell_label_im==cell_num cell_mask = morphology.binary_closing(cell_mask, disk(3)) cell_label_im[cell_mask] = cell_num return cell_label_im
def parasites(image, cells, voronoi): img = Functions.global_otsu(image) cells = Functions.global_otsu(cells) s_elem = Functions.fig(Functions.fig_size) # Remove cells for i in range(Functions.iterations): cells = binary_dilation(cells, s_elem) return_image = Functions.subtraction(img, cells) # Remove stuff from cells for i in range(Functions.iterations-1): return_image = binary_erosion(return_image) return_image = binary_opening(return_image) for i in range(Functions.iterations - 1): return_image = binary_dilation(return_image) # Remove bigger objects removal_image = return_image.copy() for i in range(Functions.iterations + 5): removal_image = binary_erosion(removal_image) removal_image = binary_opening(removal_image) for i in range(Functions.iterations + 10): removal_image = binary_dilation(removal_image) return_image = Functions.subtraction(return_image, removal_image) # Remove voronoi lines for better quality return Functions.subtraction(return_image, voronoi)
def db_eval_boundary(foreground_mask,gt_mask,bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. Returns: F (float): boundaries F-measure P (float): boundaries precision R (float): boundaries recall """ assert np.atleast_3d(foreground_mask).shape[2] == 1 bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th*np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = seg2bmap(foreground_mask); gt_boundary = seg2bmap(gt_mask); from skimage.morphology import binary_dilation,disk fg_dil = binary_dilation(fg_boundary,disk(bound_pix)) gt_dil = binary_dilation(gt_boundary,disk(bound_pix)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) #% Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match)/float(n_fg) recall = np.sum(gt_match)/float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2*precision*recall/(precision+recall); return F
def skeleton(seg): skel, dist = skmorph.medial_axis(seg, return_distance=True) node, edge, leaf = (spim.label(g, np.ones((3, 3), bool))[0] for g in skel2graph(skel)) trim_edge = (edge != 0) & ~(skmorph.binary_dilation(node != 0, np.ones((3, 3), bool)) != 0) trim_edge = spim.label(trim_edge, np.ones((3, 3), bool))[0] leaf_edge_vals = skmorph.binary_dilation(leaf != 0, np.ones((3, 3), bool)) != 0 leaf_edge_vals = np.unique(trim_edge[leaf_edge_vals]) leaf_edge_vals = leaf_edge_vals[leaf_edge_vals > 0] leaf_edge = leaf != 0 trim_edge = ndshm.fromndarray(trim_edge) leaf_edge = ndshm.fromndarray(leaf_edge) Parallel()(delayed(set_msk)(leaf_edge, trim_edge, l) for l in leaf_edge_vals) trim_edge = np.copy(trim_edge) leaf_edge = np.copy(leaf_edge) leaf_edge[(skmorph.binary_dilation(leaf_edge, np.ones((3, 3), bool)) != 0) & (edge != 0)] = True leaf_edge = spim.label(leaf_edge, np.ones((3, 3), bool))[0] leaf_edge_node = skmorph.binary_dilation(leaf_edge != 0, np.ones((3, 3), bool)) != 0 leaf_edge_node = ((node != 0) & leaf_edge_node) | leaf_edge leaf_edge_node = spim.label(leaf_edge_node, np.ones((3, 3), bool))[0] cand_node = leaf_edge_node * (node != 0) cand_node = cand_node.nonzero() cand_node = np.transpose((leaf_edge_node[cand_node],) + cand_node + (2 * dist[cand_node],)) cand_leaf = leaf_edge_node * (leaf != 0) cand_leaf = cand_leaf.nonzero() cand_leaf = np.transpose((leaf_edge_node[cand_leaf],) + cand_leaf) if len(cand_node) > 0 and len(cand_leaf) > 0: cand_leaf = ndshm.fromndarray(cand_leaf) cand_node = ndshm.fromndarray(cand_node) pruned = Parallel()(delayed(prune_leaves)(cand_leaf, cand_node, j) for j in np.unique(cand_node[:, 0])) cand_leaf = np.copy(cand_leaf) cand_node = np.copy(cand_node) pruned_ind = [] for p in pruned: pruned_ind.extend(p) pruned_ind = tuple(np.transpose(pruned_ind)) pruned = ~skel pruned = ndshm.fromndarray(pruned) leaf_edge = ndshm.fromndarray(leaf_edge) Parallel()(delayed(set_msk)(pruned, leaf_edge, l) for l in np.unique(leaf_edge[pruned_ind])) pruned = np.copy(pruned) leaf_edge = np.copy(leaf_edge) pruned = ~pruned else: pruned = skel return pruned
def dilating3D(data, selem=skimor.disk(3), slicewise=False, sliceId=0): if slicewise: if sliceId == 0: for i in range(data.shape[0]): data[i, :, :] = skimor.binary_dilation(data[i, :, :], selem) elif sliceId == 2: for i in range(data.shape[2]): data[:, :, i] = skimor.binary_dilation(data[:, :, i], selem) else: data = scindimor.binary_dilation(data, selem) return data
def process_cell(img): # la binariza en caso de que sea escala de grises if not img.dtype == 'bool': img = img > 0 # Binarizar # Calcular máscaras para limpiar lineas largas verticales h_k = 0.8 sum0 = np.sum(img, 0) # Aplastar la matriz a una fila con las sumas de los valores de cada columna. thr0 = sum0 < h_k * img.shape[0] thr0 = thr0.reshape(len(thr0), 1) # Convertirlo a vector de una dimensión # Calcular máscaras para limpiar lineas largas horizontales w_k = 0.5 sum1 = np.sum(img, 1) thr1 = sum1 < w_k * img.shape[1] thr1 = thr1.reshape(len(thr1), 1) mask = thr0.transpose() * thr1 # Generar máscara final para la celda mask_lines = mask.copy() elem = morphology.square(5) mask = morphology.binary_erosion(mask, elem) # Eliminar ruido img1 = np.bitwise_and(mask, img) # Imagen filtrada # segmentación del bloque de números kerw = 5 # Kernel width thr_k = 0.8 # Calcular mascara para marcar inicio y fin de región con dígitos horizontalmente sum0 = np.sum(img1, 0) sum0 = signal.medfilt(sum0, kerw) thr0 = sum0 > thr_k * np.median(sum0) thr0 = np.bitwise_and(thr0.cumsum() > 0, np.flipud(np.flipud(thr0).cumsum() > 0)) thr0 = thr0.reshape(len(thr0), 1) # Calcular mascara para marcar inicio y fin de región con dígitos verticalmente sum1 = np.sum(img1, 1) sum1 = signal.medfilt(sum1, kerw) thr1 = sum1 > thr_k * np.median(sum1) thr1 = np.bitwise_and(thr1.cumsum() > 0, np.flipud(np.flipud(thr1).cumsum() > 0)) thr1 = thr1.reshape(len(thr1), 1) # Mascara final para inicio y fin de caracteres (bounding box of digit region) mask = thr0.transpose() * thr1 mask = morphology.binary_dilation(mask, morphology.square(2)) img = np.bitwise_and(mask_lines.astype(img.dtype), img) # Aplicar máscara para quitar lineas img = morphology.binary_dilation(img, morphology.disk(1)) # Dilatación para unir números quebrados por la máscara anterior img = morphology.binary_erosion(img, morphology.disk(1)) # Volver a la fomorma 'original' con los bordes unidos return np.bitwise_and(mask, img)
def compute_fluor_baseline(self, mask, fluor, margin): """mask and fluor are the global images NOTE: mask is 0 (black) at cells and 1 (white) outside """ x0, y0, x1, y1 = self.box wid, hei = mask.shape x0 = max(x0 - margin, 0) y0 = max(y0 - margin, 0) x1 = min(x1 + margin, wid - 1) y1 = min(y1 + margin, hei - 1) mask_box = mask[x0:x1, y0:y1] count = 0 inverted_mask_box = 1 - mask_box while count < 5: inverted_mask_box = morphology.binary_dilation(inverted_mask_box) count += 1 mask_box = 1 - inverted_mask_box fluor_box = fluor[x0:x1, y0:y1] self.stats["Baseline"] = np.median(mask_box[mask_box > 0] * fluor_box[mask_box > 0])
def calculate_masked_stats(): plate_no = "59798" parsed = get_plate_files(plate_no) for w in ['w2']: files = filter(lambda f: f.wave == w[1], parsed) # accum = np.zeros((2160, 2160), dtype=np.uint32) # files = filter(lambda x: 's1' not in x and 's7' not in x, all_files) nof = len(files) for i, frame in enumerate(files[0:5], 1): LogHelper.logText(frame.fullpath) img = imread(frame.fullpath) t = filters.threshold_yen(img) b1 = img > t b2 = binary_erosion(b1, square(2)) b3 = binary_dilation(b2, square(10)) b4 = binary_closing(b3, square(3)) imm = np.ma.masked_where(b4, img) mn, mx = np.percentile(imm, (1, 99)) LogHelper.logText( '%3d of %d, %4d-%4d-%4d-%5d, %.0f-%.0f' % (i, nof, imm.min(), mn, mx, imm.max(), imm.mean(), imm.std()) ) im2 = imm.filled(int(imm.mean())) out_name = "{0}\\{5}-{1}{2}-{3}-{4}.tif".format(ROOT_DIR, frame.row, frame.column, frame.site, LogHelper.init_ts, frame.experiment) imsave(out_name, im2)
def draw_gray_tree(frame): """ use a grayscale copy of the frame to draw a quadtree on the original frame """ tree = trees.tree_edges(grayscale(frame)) tree = morphology.binary_dilation(tree) return color_mask(frame, np.logical_not(tree))
def draw_tree(frame): """ draw the edges of a quadtree on the frame """ tree = trees.tree_edges(frame) tree = morphology.binary_dilation(tree) return color_mask(frame, np.logical_not(tree))
def neg_tree(frame): """ draw a tree in negative """ tree = trees.tree_edges(frame) tree = morphology.binary_dilation(tree) return color_mask(frame, tree)
def double_dilation(binary, selem): '''Returns the result of two sequential binary dilations''' for i in (1,2): binary = binary_dilation(binary, selem) return binary
def estimate_rotation(img): assert(img.dtype == 'bool') # elimina bloques rellenos para acelerar la deteccion de lineas elem = morphology.square(2) aux = morphology.binary_dilation(img, elem) - morphology.binary_erosion(img, elem) # Detección de lineas usando transformada de Hough probabilística thres = 50 minlen = 0.1 * min(aux.shape) maxgap = 0.01 * minlen lines = transform.probabilistic_hough(aux, threshold=thres, line_length=minlen, line_gap=maxgap) # me aseguro que el primer punto de cada línea sea el más próximo al origen for lin in lines: (x0,y0), (x1,y1) = lin if x1*x1+y1*y1 < x0*x0+y0*y0: (x0, x1) = (x1, x0) (y0, y1) = (y1, y0) # orientación dominante angle_half_range = np.math.pi / 4 nbins = int(2 * angle_half_range * (180./np.math.pi) / 0.2) orient = [] for lin in lines: (x0,y0), (x1,y1) = lin orient.append(np.math.atan2(y1-y0, x1-x0)) (h, binval) = np.histogram(orient, range=(-angle_half_range, angle_half_range), bins=nbins) alpha = binval[h.argmax()] * (180./ np.math.pi) return alpha + 0.5 * (binval[1] - binval[0]) * (180./ np.math.pi)
def detect_edges(image_array): """ Detect edges in a given image Takes a numpy.array representing an image, apply filters and edge detection and return a numpy.array Parameters ---------- image_array : ndarray (2D) Image data to be processed. Detect edges on this 2D array representing the image Returns ------- edges : ndarray (2D) Edges of an image. """ #Transform image into grayscale img = rgb2gray(image_array) #Remove some noise from the image img = denoise_tv_chambolle(img, weight=0.55) #Apply canny edges = filter.canny(img, sigma=3.2) #Clear the borders clear_border(edges, 15) #Dilate edges to make them more visible and connected edges = binary_dilation(edges, selem=diamond(3)) return edges
def mask(self, dims=None, binary=True, outline=False): """ Construct a mask from a source, either locally or within a larger image. Parameters ---------- dims : list or tuple, optional, default = None Dimensions of large image in which to draw mask. If none, will restrict to the bounding box of the region. binary : boolean, optional, deafult = True Whether to incoporate values or only show a binary mask outline : boolean, optional, deafult = False Whether to only show outlines (derived using binary dilation) """ coords = self.coordinates if dims is None: extent = self.bbox[len(self.center):] - self.bbox[0:len(self.center)] + 1 m = zeros(extent) coords = (coords - self.bbox[0:len(self.center)]) else: m = zeros(dims) if hasattr(self, 'values') and self.values is not None and binary is False: m[coords.T.tolist()] = self.values else: m[coords.T.tolist()] = 1 if outline: from skimage.morphology import binary_dilation m = binary_dilation(m, ones((3, 3))) - m return m
def split_image_into_sudoku_pieces_adaptive_global(image, otsu_local=False, apply_gaussian=False): L = image.shape[0] d = int(np.ceil(L / 9)) dd = d // 5 output = [] if apply_gaussian: image = gaussian_filter(image, sigma=1.0) if not otsu_local: image = to_binary_adaptive(image) for k in range(9): this_row = [] start_row_i = max([k * d - dd, 0]) stop_row_i = min([(k + 1) * d + dd, L]) for kk in range(9): start_col_i = max([kk * d - dd, 0]) stop_col_i = min([(kk + 1) * d + dd, L]) i = image[start_row_i:stop_row_i, start_col_i:stop_col_i].copy() if otsu_local: i = to_binary_otsu(i) i = binary_opening(i) i = to_binary_otsu(i) if apply_gaussian: i = to_binary_otsu(binary_dilation(i)) this_row.append(i) output.append(this_row) return output, image
def separate_segments(self): """ Perform image segmentation on the "segment image", and remove any segments that aren't the right part of the track. """ # binary image binary_segment_image = ( self.end_segment_image > self.options.low_threshold_kev) # segmentation: labeled regions, 8-connectivity labels = morph.label(binary_segment_image, connectivity=2) x1 = self.end_coordinates[0] - self.end_segment_offsets[0] y1 = self.end_coordinates[1] - self.end_segment_offsets[1] x2 = self.start_coordinates[0] - self.end_segment_offsets[0] y2 = self.start_coordinates[1] - self.end_segment_offsets[1] chosen_label = labels[x1, y1] if labels[x2, y2] != chosen_label: # this happens with 4-connectivity. need to use 8-connectivity raise RuntimeError('What the heck happened?') binary_again = (labels == chosen_label) # dilate this region, in order to capture information below threshold # (it won't include the other regions, because there must be a gap # between) pix_to_keep = morph.binary_dilation(binary_again) self.end_segment_image[np.logical_not(pix_to_keep)] = 0
def binarize_canny(pic_source, sensitivity = 5.): ht = 5. + ((10 - sensitivity)/5.)*20. # print ht edges = canny_filter(pic_source, sigma = 3, high_threshold = ht, low_threshold = 2.) selem_morph = np.array([0,1,0,1,1,1,0,1,0], dtype=bool).reshape((3,3)) for i in (1,2): edges = binary_dilation(edges, selem_morph) # misc.imsave('/home/varnivey/Data/Biophys/Burnazyan/Experiments/fluor_calc/test/edges.jpg', edges) # binary = ndimage.binary_fill_holes(edges) labels = measure_label(edges) labelcount = np.bincount(labels.ravel()) bg = np.argmax(labelcount) edges[labels != bg] = 255 selem_med = np.ones((3,3), dtype = bool) binary = median_filter(edges, selem_med) for i in (1,2,3): binary = binary_erosion(edges, selem_morph) return edges
def single_out_annotation(base_image, small_cc_image): """ extracting individual annotations : starting from potential annotation + noise, we remove the noise and consolidate annotation area, then return the coordinates of center of potential annotations""" import numpy as np # remove small stuff filtered_small_cc, removed_small_cc_small = remove_small_ccomponents( small_cc_image, size_closing=5, hist_thres=120) # plot_image(removed_small_cc_small) # dilate from skimage.morphology import binary_dilation, disk dilation_radius = 10 small_cc_cleaned_mask = binary_dilation(filtered_small_cc, disk(dilation_radius)) # plot_image(small_cc_cleaned_mask) # label connected compoenents from skimage.morphology import label from skimage.measure import regionprops markers, n_label = label(small_cc_cleaned_mask, connectivity=1, background=0, return_num=True) # for each cc, defines a region image_for_region = (base_image*255).astype(np.uint8) region_prop = regionprops(markers, image_for_region) # for each region, do something return region_prop
def segment_roi(roi): # step 1. phase congruency (edge detection) Mm = phasecong_Mm(roi) # step 2. hysteresis thresholding (of edges) B = hysthresh(Mm,HT_T1,HT_T2) # step 3. trim pixels off border B[B[:,1]==0,0]=0 B[B[:,-2]==0,-1]=0 B[0,B[1,:]==0]=0 B[-1,B[-2,:]==0]=0 # step 4. threshold to find dark areas dark = dark_threshold(roi, DARK_THRESHOLD_ADJUSTMENT) # step 5. add dark areas back to blob B = B | dark # step 6. binary closing B = binary_closing(B,SE3) # step 7. binary dilation B = binary_dilation(B,SE2) # step 8. thinning B = bwmorph_thin(B,3) # step 9. fill holes B = binary_fill_holes(B) # step 10. remove blobs smaller than BLOB_MIN B = remove_small_objects(B,BLOB_MIN,connectivity=2) # done. return B
def dilate(self, size): """ Dilate a source using morphological operators. Parameters ---------- size : int Size of dilation in pixels """ if size == 0: newcoords = self.coordinates else: size = (size * 2) + 1 if hasattr(self, "values") and self.values is not None: raise AttributeError("Cannot dilate sources with values") from skimage.morphology import binary_dilation coords = self.coordinates extent = self.bbox[len(self.center) :] - self.bbox[0 : len(self.center)] + 1 + size * 2 m = zeros(extent) coords = coords - self.bbox[0 : len(self.center)] + size m[coords.T.tolist()] = 1 m = binary_dilation(m, ones((size, size))) newcoords = asarray(where(m)).T + self.bbox[0 : len(self.center)] - size newcoords = [c for c in newcoords if all(c >= 0)] newid = self.id if hasattr(self, "id") else None return Source(coordinates=newcoords, id=newid)
def single_out_annotation(base_image, small_cc_image): """ extracting individual annotations : starting from potential annotation + noise, we remove the noise and consolidate annotation area, then return the coordinates of center of potential annotations""" # remove small stuff filtered_small_cc, removed_small_cc_small = remove_small_ccomponents(small_cc_image, size_closing=5, hist_thres=120) #plot_image(removed_small_cc_small) # dilate from skimage.morphology import binary_dilation, disk dilation_radius = 10 small_cc_cleaned_mask = binary_dilation(filtered_small_cc, disk(dilation_radius)) #plot_image(small_cc_cleaned_mask) #label connected compoenents from skimage.morphology import label from skimage.measure import regionprops from skimage.io import imsave markers, n_label = label(small_cc_cleaned_mask, connectivity=1, background=0, return_num=True) #for each cc, defines a region region_prop = regionprops(markers, (base_image*255).astype(np.uint8)) #for each region, do something base_path = '/media/sf_RemiCura/PROJETS/belleepoque/extract_data_from_old_paris_map/jacoubet/results/annotations/' for region in region_prop: #print(region.bbox, region.area) imsave(base_path+str(region.bbox)+'.png', region.intensity_image) return region_prop
def detect_glare(im): """Detect pixels in the image where all channels have values above 0.95. These pixels are dilated with a disk with radius 5 """ saturated_pixels = np.all(im>0.95*np.iinfo(im.dtype).max,axis=2) glare_mask = morph.binary_dilation(saturated_pixels, morph.disk(5)).astype('bool') return glare_mask
def draw_dot_tree(frame): """ use a grayscale copy of the frame to draw a quadtree and put a dot at centers of nodes """ tree = trees.tree_dots(grayscale(frame)) selem = morphology.diamond(4, dtype=np.bool) tree = morphology.binary_dilation(tree, selem=selem) return color_mask(frame, np.logical_not(tree))
def cells(image): img = Functions.global_otsu(image) s_elem = Functions.fig(Functions.fig_size) for i in range(Functions.iterations): img = binary_erosion(img, s_elem) for i in range(Functions.iterations): img = binary_dilation(img, s_elem) return Functions.watershed_separation(img, s_elem)
def seg_sect(self, img): img_canny = canny(img, sigma=self.sigma, low_threshold=self.low_threshold) img_dilate = binary_dilation(img_canny, square(3)) img_erode = binary_erosion(img_dilate, square(3)) img_fill = binary_fill_holes(img_erode) return img_fill
def rotate_blob(blob, theta): """rotate a blob and smooth out rotation artifacts""" blob = rotate(blob,-1*theta,resize=True) blob = binary_closing(blob,SE3) blob = binary_dilation(blob,SE2) # note that H Sosik's version does one iteration # of thinning but 3 is closer to area-preserving blob = bwmorph_thin(blob,3) return blob
def structure_factor(positions, m=4, margin=0): """return the 2d structure factor""" raise StandardError, "um this isn't finished" #center = 0.5*(positions.max(0) + positions.min(0)) inds = np.round(positions - positions.min()).astype(int) f = np.zeros(inds.max(0)+1) f[inds[:,0], inds[:,1]] = 1 f = binary_dilation(f, disk(ss/2)) return fft2(f, overwrite_x=True)
def binarizacion(thresholds, out2, arreq, orig_array): """Binarize the image array to extract the ash object.""" plt.figure() # rso = morp(np.logical_and(out2 < thr, out2>tl)) thresh = thresholds[0] thr = thresh(out2) rso = morp(out2 < thr) rso = np.logical_or(morphology.binary_opening(arreq < 5, diamond(2)), rso) rso = morphology.binary_dilation(rso, diamond(2)) rso = morphology.binary_closing(rso, diamond(7)) v1a = masked_array(arreq, rso) v1b = masked_array(orig_array, np.logical_not(rso)) fig = plt.figure() plt.imshow(v1a, cmap='gray', interpolation='nearest') plt.imshow(v1b, cmap='Reds', interpolation='nearest') plt.title('Pluma Popocatepetl') plt.savefig('Pluma_popo_PRUEBA.png') plt.show() return fig
def plotPhaseBoundaryMap(self, dilate=False, **kwargs): """Plot phase boundary map :param dilate: Dilate boundary by one pixel """ # Set default plot parameters then update with any input plotParams = { 'vmax': 1, 'plotColourBar': True, 'cmap': 'grey' } plotParams.update(kwargs) boundariesImage = -self.phaseBoundaries if dilate: boundariesImage = mph.binary_dilation(boundariesImage) plot = MapPlot.create(self, boundariesImage, **plotParams) return plot
def find_neighbors(lung_slice, labels): vals = np.unique(labels, return_counts=False) # count labels' values neighbors = np.zeros((len(vals), len(vals))) # init neigborhood matrix selem = disk(2) for val in vals: temp = np.zeros(labels.shape) # template matrix temp[labels == val] = 1 # set all location 'val' to 1 temp = binary_dilation(temp, selem) # dilate the 'val' region extend_region = labels[ temp == 1] # find the map of 'val' region after extended in labels neigborhood = np.unique(extend_region) # find values in extend region print(val, neigborhood) for i in neigborhood: if (i > val): # i<val has been calculated before neighbors[val, i] = 1 neighbors[i, val] = 1 return neighbors
def filtrarImagen(image): filtros = [] filtros.append(image) gauss = cv2.GaussianBlur(image, (3, 3), 0) filtros.append(gauss) sobel = cv2.Sobel(gauss, cv2.CV_8U, 0, 1) filtros.append(sobel) _, otsu = cv2.threshold(sobel, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) filtros.append(otsu) dilation = binary_dilation(otsu) filtros.append(dilation) labels, num = label(dilation, return_num=True) sizes = np.bincount(labels.ravel()) max = 0 for size in sizes[1:]: if size > max: max = size largest = remove_small_objects(labels, max - 1) filtros.append(largest) return (largest, filtros)
def dilate_mask(mask, opt): if opt.mode == "harmonization": element = morphology.disk(radius=7) if opt.mode == "editing": element = morphology.disk(radius=20) mask = torch2uint8(mask) mask = mask[:, :, 0] mask = morphology.binary_dilation(mask, selem=element) mask = filters.gaussian(mask, sigma=5) nc_im = opt.nc_im opt.nc_im = 1 mask = np2torch(mask, opt) opt.nc_im = nc_im mask = mask.expand(1, 3, mask.shape[2], mask.shape[3]) plt.imsave('%s/%s_mask_dilated.png' % (opt.ref_dir, opt.ref_name[:-4]), convert_image_np(mask), vmin=0, vmax=1) mask = (mask - mask.min()) / (mask.max() - mask.min()) return mask
def get_image_area_to_sample(img): """ calculate set g_c, which has two properties 1) They represent background pixels 2) They are within a certain distance to the object :param img: Image that represents the object instance """ #TODO: In the paper 'Deep Interactive Object Selection', they calculate g_c first based on the original object instead # of the dilated one. # Dilate the object by d_margin pixels to extend the object boundary img_area = np.copy(img) img_area = morphology.binary_dilation( img_area, morphology.diamond(D_MARGIN)).astype(np.uint8) g_c = np.logical_not(img_area).astype(int) g_c[np.where(distance_transform_edt(g_c) > D)] = 0 return g_c
def create_mask(im_arr, erode=0): if im_arr.shape[2] == 3: im_arr = rgb2gray(im_arr) thresh = 0.05 inv_bin = np.invert(im_arr > thresh) all_labels = measure.label(inv_bin) # Select largest object and invert seg_arr = all_labels == 0 if erode > 0: strel = selem.disk(erode, dtype=np.bool) seg_arr = binary_erosion(seg_arr, selem=strel) elif erode < 0: strel = selem.disk(abs(erode), dtype=np.bool) seg_arr = binary_dilation(seg_arr, selem=strel) return seg_arr.astype(np.bool)
def get_cell_prob(lbl, dilation, erosion): ESP = 1e-5 elevation_map = [] for img in lbl: elevation_map += [sobel(img)] elevation_map = np.array(elevation_map) elevation_map = elevation_map > ESP cell_prob = ((lbl > 0) ^ elevation_map) & (lbl > 0) for i in range(len(cell_prob)): for j in range(erosion): cell_prob[i] = binary_erosion(cell_prob[i]) for i in range(len(cell_prob)): for j in range(dilation): cell_prob[i] = binary_dilation(cell_prob[i]) ret = np.array(cell_prob, dtype=np.uint8) * 255 ret[0, 0] *= 0 ret[0, -1] = 0 ret[0, :, -1] = 0 ret[0, :, 0] = 0 return ret
def std_analysis(data_path,img_vol,img_start_num,use_crop,arbitrary_vol,seq_len): ''' input the data dir and get the stable roi region using standard deviation :param data_path: :param img_vol: :param img_start_num: :param use_crop: :param bbox_thresh: :param arbitrary_vol: :param seq_len: :return: ''' img_stack = readin_image(readin_path=data_path, maxim_num=img_vol, start_num=img_start_num) if use_crop: img_stack = crop_image(img_stack) seq_multi_std = 255 start = time.time() stack_length = img_vol / seq_len dimx, dimy, dimz = img_stack.shape if img_vol > dimz: img_vol = int(input("input a number less than {0}".format(dimz))) roi_sequence = np.zeros([dimx, dimy, seq_len]) roi_std_stack = np.zeros([dimx, dimy, stack_length]) std_stack_counter = 0 for i in range(img_vol - seq_len * arbitrary_vol): for j in range(seq_len): roi_sequence[:, :, j] = img_stack[:, :, i + j * arbitrary_vol] seq_std = roi_sequence.std(axis=2) ret1, seq_std_norm_thresh = cv2.threshold(seq_std.astype(np.uint8), 0, 255, cv2.THRESH_OTSU) seq_std_norm_thresh_inv = 255 - seq_std_norm_thresh roi_std_stack[:, :, std_stack_counter] = seq_std_norm_thresh seq_multi_std *= seq_std_norm_thresh_inv / 255 if std_stack_counter < stack_length - 1: std_stack_counter += 1 end = time.time() elapsed_time = end-start print('std elapsed time: {0}'.format(elapsed_time)) seq_multi_std = binary_dilation(seq_multi_std) return seq_multi_std
def dilation_filter(image, kernel_shape=None, kernel_size=None): """Apply a dilation to a 2-d image. Parameters ---------- image : np.ndarray Image with shape (y, x). kernel_shape : str Shape of the kernel used to compute the filter ('diamond', 'disk', 'rectangle' or 'square'). kernel_size : int or Tuple(int) The size of the kernel. For the rectangle we expect two integers (width, height). Returns ------- image_filtered : np.ndarray, np.uint Filtered 2-d image with shape (y, x). """ # TODO check dtype # check parameters check_array(image, ndim=2, dtype=[np.uint8, np.uint16, bool]) check_parameter(kernel_shape=(str, type(None)), kernel_size=(int, tuple, list, type(None))) # get kernel if kernel_shape is None or kernel_size is None: kernel = None else: kernel = _define_kernel(shape=kernel_shape, size=kernel_size, dtype=image.dtype) # apply filter if image.dtype == bool: image_filtered = binary_dilation(image, kernel) else: image_filtered = dilation(image, kernel) return image_filtered
def watershed(image, mask, filename, dims, dial_rad=9, max_neuron_size=21, min_neuron_size=4, coef=2): """ Receives an image, a boolean mask containing the seeds for the watershed algorithm, the json filename to save, the dimensions of the image and an optional dialation radius and max/min neuron sizes for size selection, as well as the length/width ratio (coef). """ # sure foreground sure_fg = np.uint8(mask) sure_bg = np.uint8( morphology.binary_dilation(mask, morphology.disk(dial_rad))) # initialize img = np.zeros([512, 512, 3], dtype='uint8') img[:, :, 0] = np.ndarray.astype(norm_data(image) * 255, 'uint8') # unknown area unknown = cv.subtract(sure_bg, sure_fg) # check connected components and label ret, markers = cv.connectedComponents(sure_fg) del ret # make backgorund 1, and unknown 0 markers += 1 markers[unknown == 1] = 0 # run watershed markers_after_WS = cv.watershed(img, markers) size_selected_markers = size_selection(markers_after_WS, max_neuron_size, min_neuron_size, coef) if size_selected_markers is not None: # if size selection kept anything mask_to_json(size_selected_markers, filename, True) return size_selected_markers else: return None
def locate_plumes_with_fires(aod, fire_rows_plume, fire_cols_plume): ''' For each fire check its nearest label. If a label appears more than once it is associated with multiple fires, so get rid of it. ''' mask = aod >= AOD_MIN_LIMIT # update using climatological data? Or ML approach? Pros and Cons. mask = binary_erosion(mask) mask = binary_dilation(mask) # label the mask labelled_image = label(mask) # find all labels associated with a fire all_plume_labels = [] for r, c in zip(fire_rows_plume, fire_cols_plume): nearest_label_for_fire = extract_label(labelled_image, r, c) if nearest_label_for_fire is not None: all_plume_labels.append(nearest_label_for_fire) # drop any labels that are duplicated final_plume_labels = [] for l in all_plume_labels: appearences = np.sum(all_plume_labels == l) if appearences < 2: final_plume_labels.append(l) # update labelled image for l in np.unique(labelled_image): if l not in final_plume_labels: labelled_image[labelled_image == l] = 0 elif (labelled_image == l).sum() > 10000: # get rid of unreasonably large plumes labelled_image[labelled_image == l] = 0 elif (labelled_image == l).sum() < 100: # get rid of unreasonably large plumes labelled_image[labelled_image == l] = 0 return labelled_image
def get_water_mask_from_S2(ndwi, canny_sigma=4, canny_threshold=0.3, selem=disk(4)): """ Make water detection on input NDWI single band image. """ # default threshold (no water detected) otsu_thr = 1.0 status = 0 # transform NDWI values to [0,1] ndwi_std = (ndwi - np.min(ndwi))/np.ptp(ndwi) if len(np.unique(ndwi)) > 1: edges = canny(ndwi_std, sigma=canny_sigma, high_threshold=canny_threshold) edges = binary_dilation(edges, selem) ndwi_masked = ma.masked_array(ndwi, mask=np.logical_not(edges)) if len(np.unique(ndwi_masked.data[~ndwi_masked.mask])) > 1: # threshold determined using dilated canny edge + otsu otsu_thr = threshold_otsu(ndwi_masked.data[~ndwi_masked.mask]) status = 1 # if majority of pixels above threshold have negative NDWI values # change the threshold to 0.0 fraction = np.count_nonzero(ndwi>0)/np.count_nonzero(ndwi>otsu_thr) if fraction < 0.9: otsu_thr = 0.0 status = 3 else: # theshold determined with otsu on entire image otsu_thr = threshold_otsu(ndwi) status = 2 # if majority of pixels above threshold have negative NDWI values # change the threshold to 0.0 fraction = np.count_nonzero(ndwi>0)/np.count_nonzero(ndwi>otsu_thr) if fraction < 0.9: otsu_thr = 0.0 status = 4 return status, (ndwi>otsu_thr).astype(np.uint8)
def dilate_contour(contour, radius=5): """Dilate the contour. PARAMETERS ---------- contour: numpy array Array containing the contour radius: int, default 5 Radius of ring to be extracted. """ contour = contour.astype(bool) radius = int(radius) disk = morphology.disk(radius) # Dilation with radius in axial direction for ind in range(contour.shape[0]): contour[ind, :, :] = morphology.binary_dilation(contour[ind, :, :], disk) return contour
def get_connected_component_shape(layer, event): data_coordinates = layer.world_to_data(event.position) cords = np.round(data_coordinates).astype(int) val = layer.get_value(data_coordinates) if val is None: return if val != 0: data = layer.data binary = data == val if 'Shift' in event.modifiers: binary_new = binary_erosion(binary) data[binary] = 0 else: binary_new = binary_dilation(binary) data[binary_new] = val size = np.sum(binary_new) layer.data = data msg = (f'clicked at {cords} on blob {val} which is now {size} pixels') else: msg = f'clicked at {cords} on background which is ignored' print(msg)
def fillgaps(img, iterations=1, mode='closing', sqsize=21): """dilation bit is from http://stackoverflow.com/a/28079714 fills gaps in binary skeleton image """ from skimage import morphology, img_as_bool, segmentation from scipy import ndimage as ndi if mode is 'dilate': image = 1 - img_as_bool(img) out = ndi.distance_transform_edt(~image) out = out < 0.05 * out.max() out = morphology.skeletonize(out) out = morphology.binary_dilation(out, morphology.selem.disk(1)) out = segmentation.clear_border(out) out = out | image elif mode is 'closing': out = 1 - img_as_bool(img) while iterations > 0: out = morphology.binary_closing(out, morphology.square(sqsize)) iterations -= 1 return out
def segmentate(): LastMap = None while True: # i, img = segmentation_queue.get() i, img = segmentation_pipe_out.recv() print("starting Segmentation %s"%i) SegMap, diff = VB.segmentate(img, do_neighbours=False, return_diff=True) # import matplotlib.pyplot as plt # plt.imshow(VB.DumbStory) # plt.show() if LastMap is None: LastMap = np.zeros_like(SegMap, dtype=bool) SegMap &= ~NoMask VB.update(SegMap, img, do_neighbours=False) SegMap = binary_dilation(SegMap, selem=disk(5)) SegMap = binary_erosion(SegMap, selem=disk(4)) # mask = SegMap & ~ (SegMap & LastMap) SegMap_write_queue.put([i, SegMap]) detection_pipe_in.send([i, SegMap, np.sum(diff, axis=0)]) # LastMap = SegMap print("Segmentated Image %s"%i)
def basin(label_mask, wall): h, w = np.shape(label_mask) y, x = np.mgrid[0:h, 0:w] struct = generate_binary_structure(2, 2) shifty, shiftx = np.mgrid[0:3, 0:3] shifty = (shifty - 1).flatten() shiftx = (shiftx - 1).flatten() for i in range(4): obdr = label_mask ^ binary_dilation(label_mask, struct) ibdr = label_mask ^ binary_erosion(label_mask, struct) yob, xob = y[obdr], x[obdr] ynb, xnb = yob.reshape(-1, 1) + shifty, xob.reshape(-1, 1) + shiftx wallnb = np.min(map_coords(wall, (ynb, xnb))*(map_coords(ibdr, (ynb, xnb))==1)+\ 5*(map_coords(ibdr, (ynb, xnb))!=1),1) keep = (wall[yob, xob] > wallnb) & (wallnb <= 4) label_mask[yob[keep], xob[keep]] = True if np.sum(keep) == 0: break return label_mask
def neighbours(watershedImg, threshold_height_cells): '''Return array of pairs of neighbouring cells from list of thresholded cells in whole image''' #If cells aren't thresholded #cells=np.sort(np.unique(watershedImg)) #cells=cells[1:] neighbours = np.empty((0, 2)) for cel in threshold_height_cells: BW = segmentation.find_boundaries(watershedImg == cel) BW_dilated = morphology.binary_dilation(BW) neighs = np.unique(watershedImg[BW_dilated == 1]) indices = np.where(neighs == 0.0) indices = np.append(indices, np.where(neighs == cel)) neighs = np.delete(neighs, indices) for n in neighs: neighbours = np.append(neighbours, [(cel, n)], axis=0).astype(np.uint16) return neighbours
def task1(): img = mimg.imread("bw1.bmp") new_img1 = binary_erosion(img, selem=square(width=30)) new_img2 = binary_erosion(img, selem=rectangle(width=30, height=20)) new_img3 = binary_erosion(img, selem=diamond(radius=5)) new_img4 = binary_erosion(img, selem=disk(radius=15)) new_img5 = binary_erosion(img, selem=star(a=10)) new_img6 = binary_erosion(img, selem=octagon(m=10, n=20)) new_img7 = binary_dilation(img, ) fig, ax = plt.subplots(1, 8) show(ax[0], img, "original") show(ax[1], new_img1, "BE square") show(ax[2], new_img2, "BE rectangle") show(ax[3], new_img3, "BE diamond") show(ax[4], new_img4, "BE disk") show(ax[5], new_img5, "BE star") show(ax[6], new_img6, "BE octagon") show(ax[7], new_img2, "binary_dilation") plt.show()
def fill_gap(bn_img, struct='disk', dim=3): """ fill holes in binary mask :param bn_img: binary mask [np.array] :param struct: structuring element to use ['disk', 'diamond', 'square'] :param dim: size of structuring element [int] :return out: filled mask [np.array] """ # set element if struct == 'disk': elm = morphology.disk(dim) elif struct == 'square': elm = morphology.square(dim) elif struct == 'diamond': elm = morphology.diamond(dim) # assume same structuring element for both closing stages out = morphology.binary_dilation(bn_img, elm) out = morphology.binary_erosion(out, elm) return out
def picasso_structure_mask(inputim, struc, dilations=2, dilationselem=np.ones((5, 5), dtype='float32')): ox = inputim.mdh['Origin.x'] oy = inputim.mdh['Origin.y'] vxsz = 1e3 * inputim.mdh['voxelsize.x'] mask0 = np.zeros(inputim.data.shape[0:2], dtype='float32') labels = [] nsites = [] cxa = [] cya = [] for label in range(0, int(struc['HandleStruct'].max() + 1)): strucn = struc['HandleStruct'] == label newim = np.zeros(inputim.data.shape[0:2], dtype='float32') strucx = ((struc['HandleX'][strucn]) - ox) / vxsz strucy = ((struc['HandleY'][strucn]) - oy) / vxsz cx = struc['HandleX'][strucn].mean() cy = struc['HandleY'][strucn].mean() cxi = np.rint(strucx.mean()).astype('int') # integer centroid x cyi = np.rint(strucy.mean()).astype('int') # integer centroid y labels.append(label + 1) nsites.append(strucx.size) cxa.append(cx) cya.append(cy) ind = (strucx < newim.shape[0]) * (strucy < newim.shape[1]) * ( strucx >= 0) * (strucy >= 0) if np.any(ind): newim[strucx[ind].astype('i'), strucy[ind].astype('i')] = 1.0 newim2 = morph.convex_hull_image(newim) for i in range(dilations): newim2 = morph.binary_dilation(newim2, selem=dilationselem) mask0[newim2 > 0.5] = label + 1 sitesdf = pd.DataFrame( list(zip(labels, nsites, cxa, cya)), columns=['Label', 'NSites', 'CentroidX', 'CentroidY']) return ImageStack(mask0, mdh=inputim.mdh), sitesdf
def main(): name = 'photos/dice30' image = cv2.imread(name + '.jpg') resized = imutils.resize(image, width=300) ratio = image.shape[0] / float(resized.shape[0]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) denoised = restoration.denoise_nl_means(gray, h=0.95) threshold = filters.threshold_minimum(denoised) thres = denoised > threshold canny = feature.canny(thres, sigma=3) dilated = mp.binary_dilation(canny, selem=selem) edges = to_uint8(dilated) contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Loop over the contours counter = 0 for c, h in zip(contours, hierarchy[0]): c = c.astype("float") c *= ratio c = c.astype("int") cv2.drawContours(image, [c], -1, (0, 255, 0), 5) if h[0] == -1 and h[1] == -1 and h[2] == -1: counter += 1 cv2.putText(image, "Total: " + str(counter), (int(len(image[0]) / 4), int(0.95 * len(image))), cv2.FONT_HERSHEY_SIMPLEX, 10 / (4000 / len(image)), (255, 255, 255), 5) print_hierarchy(hierarchy[0], contours, ratio, image) img = Image.fromarray(image) img.save(name + '_proc.jpg') img.show()
def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): """ Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 """ selem = sim.ball(bw) if ants_segs is None: ants_segs = np.zeros_like(aseg, dtype=np.uint8) aseg[aseg == 42] = 3 # Collapse both hemispheres gm = anat.copy() gm[aseg != 3] = 0 refined = refine_aseg(aseg) newrefmask = sim.binary_dilation(refined, selem) - refined indices = np.argwhere(newrefmask > 0) for pixel in indices: # When ATROPOS identified the pixel as GM, set and carry on if ants_segs[tuple(pixel)] == 2: refined[tuple(pixel)] = 1 continue window = gm[ pixel[0] - ww:pixel[0] + ww, pixel[1] - ww:pixel[1] + ww, pixel[2] - ww:pixel[2] + ww ] if np.any(window > 0): mu = window[window > 0].mean() sigma = max(window[window > 0].std(), 1.e-5) zstat = abs(anat[tuple(pixel)] - mu) / sigma refined[tuple(pixel)] = int(zstat < zval) refined = sim.binary_opening(refined, selem) return refined
def weight_binary_ratio(label, mask=None, dilate=False): if label.max() == label.min(): # uniform weights for single-label volume return np.ones_like(label, np.float32) min_ratio = 5e-2 label = (label != 0).astype(np.float64) # foreground if mask is not None: mask = mask.astype(label.dtype)[np.newaxis, :] ww = (label*mask).sum() / mask.sum() else: ww = label.sum() / np.prod(label.shape) ww = np.clip(ww, a_min=min_ratio, a_max=1-min_ratio) weight_factor = max(ww, 1-ww)/min(ww, 1-ww) if dilate: N = label.ndim assert N in [3, 4] struct = np.ones([1]*(N-2) + [3, 3]) label = (label != 0) label = binary_dilation(label, struct).astype(np.float64) # Case 1 -- Affinity Map # In that case, ww is large (i.e., ww > 1 - ww), which means the high weight # factor should be applied to background pixels. # Case 2 -- Contour Map # In that case, ww is small (i.e., ww < 1 - ww), which means the high weight # factor should be applied to foreground pixels. if ww > 1-ww: # switch when foreground is the dominate class label = 1 - label weight = weight_factor*label + (1-label) if mask is not None: weight = weight*mask return weight.astype(np.float32)
def frame_diff_analysis(data_path, img_vol, img_start_num, use_crop, arbitrary_vol, diff_step): ''' input the data dir and get the stable roi region using frame difference :param data_path: :param img_vol: :param img_start_num: :param use_crop: :param bbox_thresh: :param arbitrary_vol: :param seq_len: :param diff_step: :return: ''' img_stack = readin_image(readin_path=data_path, maxim_num=img_vol, start_num=img_start_num) if use_crop: img_stack = crop_image(img_stack) seq_multi_frame_diff = 255 [dimx, dimy, _] = img_stack.shape start = time.time() diff_stack = arbitrary_frame_diff(img_stack, step=arbitrary_vol) diff_stack_length = diff_step diff_roi_sequence = np.zeros([dimx, dimy, diff_stack_length]) for i in range(0, diff_stack.shape[2], diff_step): for j in range(diff_step): diff_roi_sequence[:, :, j] = diff_stack[:, :, i + j] diff_short_sum = np.sum(diff_roi_sequence, axis=2) diff_short_seq_diff_255 = diff_short_sum.astype(np.uint8) ret0, diff_short_seq_diff_thresh = cv2.threshold( diff_short_seq_diff_255, 0, 255, cv2.THRESH_OTSU) diff_short_seq_std_norm_thresh_inv = 255 - diff_short_seq_diff_thresh seq_multi_frame_diff *= diff_short_seq_std_norm_thresh_inv / 255 frame_diff_result = seq_multi_frame_diff frame_diff_result = binary_dilation(frame_diff_result) end = time.time() elapsed_time = end - start print('frame difference elapsed time: {0}'.format(elapsed_time)) return frame_diff_result
def addGrainBoundaries(self, colour=None, dilate=False): """Add grain boundaries to the plot. Parameters ---------- colour : str Colour of grain boundaries. dilate : bool If true, dilate the grain boundaries Returns ------- matplotlib.image.AxesImage """ if colour is None: colour = "white" boundariesImage = -self.callingMap.boundaries if dilate: boundariesImage = mph.binary_dilation(boundariesImage) # create colourmap for boundaries going from transparent to # opaque of the given colour boundariesCmap = mpl.colors.LinearSegmentedColormap.from_list( 'my_cmap', ['white', colour], 256) boundariesCmap._init() boundariesCmap._lut[:, -1] = np.linspace(0, 1, boundariesCmap.N + 3) img = self.ax.imshow(boundariesImage, cmap=boundariesCmap, interpolation='None', vmin=0, vmax=1) self.draw() self.imgLayers.append(img) return img
def dilateBead(diameter, shape, factor=10): """Dilates a given circle based on the diameter, image shape and dilation factor Args: diameter (int): The diameter of the circle. shape (int): The shape of the image the circle is derived from. factor (optional) (int): Dilation factor, determines the size of the selection element. Returns: diameter (int): The diameter of the dilated circle. """ temp = np.zeros(shape) circle = draw.circle(shape[0] / 2, shape[1] / 2, diameter / 2) temp[circle[0], circle[1]] = 1 temp = morphology.binary_dilation(temp, np.ones((factor, factor))) return np.unique(temp, return_counts=True)[1][1:]
def cell_markers_from_mask(mask_cell, param, scale=1, vis_diag=False, fig=''): # use dtf to find markers for watershed skel, dtf = morphology.medial_axis(mask_cell, return_distance=True) dtf.flat[(mask_cell > 0).flatten()] += np.random.random( ((mask_cell > 0).sum())) # watershed seeds # TODO - add parameters to cfg local_maxi = feature.peak_local_max( dtf, indices=False, threshold_abs=0.25 * param.rbcR * scale, footprint=np.ones( (int(1.5 * param.rbcR * scale), int(1.5 * param.rbcR * scale))), labels=mask_cell.copy()) #markers, n_RBC = measure.label(local_maxi,return_num=True) markers = morphology.binary_dilation(local_maxi > 0, morphology.disk(3)) label = measure.label(markers, connectivity=markers.ndim) prop = measure.regionprops(label) return markers, prop
def find_background(image, morph_filters=True): """ Returns an index map of the pixels in input image believed to be background. This function first converts to HSV and then uses k-means to partition the saturation-values. The average of the two cluster-middle-points is used as the threshold to distinguish between foreground and background. Optionally, morphological filters may be applied to fill in gaps in the detected foreground (e.g., holes within detected cells) and remove background specks (debris that has the same color as the cells we want to detect). :param image: RGB image (either uint8 0-255, or float 0.0-1.0), with light background. :param morph_filters: boolean flag. Whether or not to apply morphological filters to improve background detection. :return: index map of pixels believed to be the background. """ if image.dtype == np.uint8: image = image.astype(np.float64) / 255. # convert to HSV # https://en.wikipedia.org/wiki/HSL_and_HSV#/media/File:HSV_color_solid_cylinder_saturation_gray.png hsv_img = color.rgb2hsv(image) # find background through a threshold saturation level # apply k-means to cluster the saturation values into two groups (foreground and background) S = hsv_img[:, :, 1].flatten() V = hsv_img[:, :, 2].flatten() data = np.stack((S, V), axis=1) mask = KMeans(n_clusters=2).fit_predict(data) mask = mask.reshape(image.shape[0:2]) if np.average(hsv_img[mask == 1, 2]) > np.average( hsv_img[mask == 0, 2]): # ensure foreground is index 1 mask = 1 - mask if morph_filters: mask = mask mask = morph.binary_closing(mask, selem=morph.disk(30)) mask = morph.binary_dilation(mask, selem=morph.disk(5)) mask = morph.binary_opening(mask, selem=morph.disk(30)) background_mask = mask == 0 return background_mask