def grey_processing(inputImg): # fp = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] fp = np.ones((3, 3)) data = nd.median_filter(inputImg, size=7) data = nd.grey_closing(data, footprint=fp) data = nd.grey_opening(data,footprint=fp) return data
def segment_lungs(pixel_array): img_gray_open = ndi.grey_opening(pixel_array, size=10, mode='wrap') if DEBUG: plot(img_gray_open, 'Gray Opening') elavation_map = sobel(img_gray_open) if DEBUG: plot(elavation_map, 'Elevation Map') markers = extract_markers(img_gray_open) if DEBUG: plot(markers, 'Markers') watersheded = morphology.watershed(elavation_map, markers) if DEBUG: plot(watersheded, 'Watershed') external_contour = ndi.binary_fill_holes(watersheded - 1) if DEBUG: plot(external_contour, 'External Contour') watersheded_no_contour = (watersheded - external_contour) if DEBUG: plot(watersheded_no_contour, 'Watershed (No Contour)') holes_filled = ndi.binary_fill_holes(watersheded_no_contour - 1) if DEBUG: plot(holes_filled, 'Watershed (No Contour + Holes Filled)') removed_noise = morphology.remove_small_objects(holes_filled, 300) if DEBUG: plot(removed_noise, 'Removed Noise') return removed_noise
def mpls_baseline(intensities, smoothness_param=100, deriv_order=1, window_length=100): '''Perform morphological weighted penalized least squares baseline removal. * paper: DOI: 10.1039/C3AN00743J (Paper) Analyst, 2013, 138, 4483-4492 * Matlab code: https://code.google.com/p/mpls/ smoothness_param: Relative importance of smoothness of the predicted response. deriv_order: Polynomial order of the difference of penalties. window_length: size of the structuring element for the open operation. ''' Xbg = grey_opening(intensities, window_length) # find runs of equal values in Xbg flat = (np.diff(Xbg) != 0).astype(np.int8) run_idx, = np.where(np.diff(flat)) # local minimums between flat runs bounds = run_idx[1:-1] if len(run_idx) % 2 == 0 else run_idx[1:] bounds = bounds.reshape((-1, 2)) + (1, 2) min_idxs = np.array([np.argmin(Xbg[s:t]) for s, t in bounds], dtype=int) min_idxs += bounds[:, 0] # create the weight vector by setting 1 at each local min w = np.zeros_like(intensities) w[min_idxs] = 1 # make sure we stick to the ends w[0] = 5 w[-1] = 5 # run one iteration of smoothing smoother = WhittakerSmoother(Xbg, smoothness_param, deriv_order=deriv_order) return smoother.smooth(w)
def open_image(image_filtered): """open_image example ndimage.grey_opening """ c = ndimage.grey_opening(np.abs(image_filtered), size=(5, 5, 5)) new_image = nib.Nifti1Image(normalise(c), affine) new_image.set_data_dtype(np.float32) nib.save(new_image, 'image_open.nii.gz')
def test_opening(self): self.image.opening(3) original_image = retina_grayscale.Retina_grayscale( None, _image_path, 1) assert_array_equal( self.image.np_image, ndimage.grey_opening(original_image.np_image, size=(3, 3)))
def MMF(Y,rn,bg): stru_len_op = int(bg) stru_len_clo = int(bg*1.5) stru_ele_op = np.linspace(0,0,stru_len_op) stru_ele_clo = np.linspace(0,0,stru_len_clo) #triangular wave tri_wave = [] amp = 1.0 width = 1 samp = rn asym = 0.5 points = 1 while points <= samp: Xi = 0.1*points if 0 <= Xi and Xi <= width*asym: tri_wave.append(amp*Xi/(width*asym)) elif Xi > width*asym and Xi <width: tri_wave.append(amp*(width-Xi)/(width*(1-asym))) else: tri_wave.append(0) points += 1 #low-pass op_flat = nd.grey_opening(Y,size = (stru_len_op),structure = stru_ele_op) clo_flat = nd.grey_closing(op_flat,size = (stru_len_clo),structure = stru_ele_clo) reducing = [] for reduce in range(len(Y)): reducing.append(Y[reduce] - clo_flat[reduce]) op_tri = nd.grey_opening(reducing,size = (rn),structure = tri_wave) clo_tri = nd.grey_closing(reducing,size = (rn),structure = tri_wave) after_stru_ele =np.linspace(0,0,rn) op_than_clo = nd.grey_closing(op_tri,size = (rn),structure = after_stru_ele) clo_than_op = nd.grey_opening(clo_tri,size = (rn),structure = after_stru_ele) plusing = [] for plus in range(len(op_than_clo)): plusing.append((op_than_clo[plus]+clo_than_op[plus])/2.0) return plusing, clo_flat
def run_FreeCAD_ImageT(self): from scipy import ndimage fn = self.getData('image') import matplotlib.image as mpimg img = mpimg.imread(fn) (sa, sb, sc) = img.shape red = 0.005 * (self.getData("red") + 100) green = 0.005 * (self.getData("green") + 100) blue = 0.005 * (self.getData("blue") + 100) #blue=0 say("rgb", red, green, blue) # andere filtre #img = ndimage.sobel(img) #img = ndimage.laplace(img) im2 = img[:, :, 0] * red + img[:, :, 1] * green + img[:, :, 2] * blue im2 = np.round(im2) if self.getData('invert'): im2 = 1 - im2 #im2 = ndimage.sobel(im2) ss = int((self.getData('maskSize') + 100) / 20) say("ss", ss) if ss != 0: mode = self.getData('mode') say("mode", mode) if mode == 'closing': im2 = ndimage.grey_closing(im2, size=(ss, ss)) elif mode == 'opening': im2 = ndimage.grey_opening(im2, size=(ss, ss)) elif mode == 'erosion': im2 = ndimage.grey_erosion(im2, size=(ss, ss)) elif mode == 'dilitation': im2 = ndimage.grey_dilation(im2, footprint=np.ones((ss, ss))) else: say("NO MODE") nonzes = np.where(im2 == 0) pts = [ FreeCAD.Vector(sb + -x, sa - y) for y, x in np.array(nonzes).swapaxes(0, 1) ] h = 10 pts = [ FreeCAD.Vector( sb + -x, sa - y, (red * img[y, x, 0] + green * img[y, x, 1] + blue * img[y, x, 2]) * h) for y, x in np.array(nonzes).swapaxes(0, 1) ] colors = [img[y, x] for y, x in np.array(nonzes).swapaxes(0, 1)] say("len pts", len(pts)) self.setData("Points_out", pts)
def test_grey_opening_operation_sparse_input_struct_zeros(self): struct = np.zeros((3, 3, 3)) print("\n test_grey_opening_operation_sparse_input_struct_zeros...") v_output = vc.grey_opening(input_svar, structure=struct, make_float32=False) d_output = ndimage.grey_opening(input_svar, structure=struct) msgs = "test_grey_opening_operation_sparse_input_struct_zeros" self.assertTrue((d_output == v_output).all(), msg=msgs)
def apply(array, **kwargs): """ Apply a set of standard filter to array data: Call: apply(array-data, <list of key=value arguments>) The list of key-value define the filtering to be done and should be given in the order to be process. Possible key-value are: * smooth: gaussian filtering, value is the sigma parameter (scalar or tuple) * uniform: uniform filtering (2) * max: maximum filtering (1) * min: minimum filtering (1) * median: median filtering (1) * dilate: grey dilatation (1) * erode: grey erosion (1) * close: grey closing (1) * open: grey opening (1) * linear_map: call linear_map(), value is the tuple (min,max) (3) * normalize: call normalize(), value is the method (3) * adaptive: call adaptive(), value is the sigma (3) * adaptive_: call adaptive(), with uniform kernel (3) The filtering is done using standard scipy.ndimage functions. (1) The value given (to the key) is the width of the the filter: the distance from the center pixel (the size of the filter is thus 2*value+1) The neighborhood is an (approximated) boolean circle (up to discretization) (2) Same as (*) but the neighborhood is a complete square (3) See doc of respective function """ for key in kwargs: value = kwargs[key] if key not in ('smooth','uniform'): fp = _kernel.distance(array.ndim*(2*value+1,))<=value # circular filter if key=='smooth' : array = _nd.gaussian_filter(array, sigma=value) elif key=='uniform': array = _nd.uniform_filter( array, size=2*value+1) elif key=='max' : array = _nd.maximum_filter( array, footprint=fp) elif key=='min' : array = _nd.minimum_filter( array, footprint=fp) elif key=='median' : array = _nd.median_filter( array, footprint=fp) elif key=='dilate' : array = _nd.grey_dilation( array, footprint=fp) elif key=='erode' : array = _nd.grey_erosion( array, footprint=fp) elif key=='open' : array = _nd.grey_opening( array, footprint=fp) elif key=='close' : array = _nd.grey_closing( array, footprint=fp) elif key=='linear_map': array = linear_map(array, min=value[0], max=value[1]) elif key=='normalize' : array = normalize( array, method = value) elif key=='adaptive' : array = adaptive( array, sigma = value, kernel='gaussian') elif key=='adaptive_' : array = adaptive( array, sigma = value, kernel='uniform') else: print '\033[031mUnrecognized filter :', key return array
def opening(self, size_structure): """ dilates and erodes the stored image, by default the structure is a cross :param size_structure: size of kernel to apply in the filter """ self._copy() self.np_image = ndimage.grey_opening(self.np_image, size=(size_structure, size_structure))
def __call__(self, img: np.ndarray, mode: Optional[str]=None, radius: Optional[int]=None, binary: Optional[bool]=None) -> np.ndarray: """ Apply the transform to `img`. """ self.mode = self.mode if mode is None else mode self.radius = self.radius if radius is None else radius self.binary = self.binary if binary is None else binary input_ndim = img.squeeze().ndim # spatial ndim if input_ndim == 2: structure = ndi.generate_binary_structure(2, 1) elif input_ndim == 3: structure = ndi.generate_binary_structure(3, 1) else: raise ValueError('Currently only support 2D&3D data') channel_dim = None if input_ndim != img.ndim: channel_dim = img.shape.index(1) img = img.squeeze() if self.mode == 'closing': if self.binary: img = ndi.binary_closing(img, structure=structure, iterations=self.radius) else: for _ in range(self.radius): img = ndi.grey_closing(img, footprint=structure) elif self.mode == 'dilation': if self.binary: img = ndi.binary_dilation(img, structure=structure, iterations=self.radius) else: for _ in range(self.radius): img = ndi.grey_dilation(img, footprint=structure) elif self.mode == 'erosion': if self.binary: img = ndi.binary_erosion(img, structure=structure, iterations=self.radius) else: for _ in range(self.radius): img = ndi.grey_erosion(img, footprint=structure) elif self.mode == 'opening': if self.binary: img = ndi.binary_opening(img, structure=structure, iterations=self.radius) else: for _ in range(self.radius): img = ndi.grey_opening(img, footprint=structure) else: raise ValueError(f'Unexpected keyword {self.mode}') if channel_dim is not None: return np.expand_dims(img, axis=channel_dim) else: return img
def remove_background(profiles, radius=20, light_background=True): """ Uses port of ImageJ rolling ball background subtraction to estimate background and removes the background from the image Parameters ------ profiles : Dictionary Key is the well number and the value is a ndarray (2D) of the well radius : float, optional The radius of the rolling ball (default : 20) light_background : Boolean Whether the background is light or not (default : True) Returns ------ newprofiles : Dictionary Key is the well number and the value is a ndarray (2D) of the background subtracted well """ # Make "spherical" structuring element sz_ = 2 * radius + (radius + 1) % 2 xco, yco = np.meshgrid(range(sz_), range(sz_)) ballheight = float(radius**2) - (xco - radius)**2 - (yco - radius)**2 ballheight[ballheight < 0] = 0 ballheight = np.ma.masked_where(ballheight < 0, ballheight) ballheight = np.sqrt(ballheight) newprofiles = {} if light_background: for k, im1 in profiles.items(): imax = im1.max() im2 = imax - im1 bg1 = ndi.grey_opening(im2, structure=ballheight, mode="reflect") im2 -= bg1 newprofiles[k] = (im2 - imax) else: for k, im1 in profiles.items(): imin = im1.min() im2 = im1 - imin bg1 = ndi.grey_opening(im2, structure=ballheight, mode="reflect") im2 -= bg1 newprofiles[k] = im2 - im2.min() return newprofiles
def test_grey_opening_operation_sparse_input_default_value(self): print("\n test_grey_opening_operation_sparse_input_default_value...") v_output = vc.grey_opening(input_svar, structure=structure, make_float32=False) d_output = ndimage.grey_opening( input_svar, structure=structure, ) msgs = "test_grey_opening_operation_sparse_input_default_value" self.assertTrue((d_output == v_output).all(), msg=msgs)
def __test_grey_opening_operation(self,input_var): print("\n grey_opening Voxel testing...") start_time = t.time() v_output = vc.grey_opening(input_var,structure=structure,no_of_blocks=PL[0],fakeghost=PL[1],make_float32=False) print("grey_opening Voxel testing time taken: ",(t.time() - start_time)," sec") #print("\n grey_opening Default testing...") start_time = t.time() d_output = ndimage.grey_opening(input_var,structure=structure) print("grey_opening Default testing time taken: ",(t.time() - start_time)," sec") msgs = "grey_opening_operation_FAIL_with parameters: ",PL self.assertTrue((d_output==v_output).all(), msg=msgs)
def test_grey_opening_operation_dense_input_fakeghost_four(self): print("\n test_grey_opening_operation_dense_input_fakeghost_four...") v_output = vc.grey_opening(input_dvar, structure=structure, fakeghost=4, make_float32=False) d_output = ndimage.grey_opening( input_dvar, structure=structure, ) msgs = "test_grey_opening_operation_dense_input_fakeghost_four" self.assertTrue((d_output == v_output).all(), msg=msgs)
def test_grey_opening_operation_sparse_input_blocks_ten(self): print("\n test_grey_opening_operation_sparse_input_blocks_ten...") v_output = vc.grey_opening(input_svar, structure=structure, no_of_blocks=10, make_float32=False) d_output = ndimage.grey_opening( input_svar, structure=structure, ) msgs = "test_grey_opening_operation_sparse_input_blocks_ten" self.assertTrue((d_output == v_output).all(), msg=msgs)
def getROI(image): image_resized = resize(image) b, g, r = cv2.split(image_resized) g = cv2.GaussianBlur(g, (15, 15), 0) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)) g = ndimage.grey_opening(g, structure=kernel) (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(g) x0 = int(maxLoc[0]) - 110 y0 = int(maxLoc[1]) - 110 x1 = int(maxLoc[0]) + 110 y1 = int(maxLoc[1]) + 110 return image_resized[y0:y1, x0:x1]
def test_2d_ndimage_equivalence(): image = np.zeros((9, 9), np.uint8) image[2:-2, 2:-2] = 128 image[3:-3, 3:-3] = 196 image[4, 4] = 255 opened = gray.opening(image) closed = gray.closing(image) footprint = ndi.generate_binary_structure(2, 1) ndimage_opened = ndi.grey_opening(image, footprint=footprint) ndimage_closed = ndi.grey_closing(image, footprint=footprint) assert_array_equal(opened, ndimage_opened) assert_array_equal(closed, ndimage_closed)
def opening(self): """Perform a grey opening: an erosion followed by a dilation.""" def disk(N): """Get circle morphology.""" y, x = np.ogrid[-N:N + .1, -N:N + .1] return np.asarray(x**2 + y**2 < N**2, dtype=np.uint8) # obtain and subtract background lighting level kernel = disk(self.lsize) if not cv2: background = ndimage.grey_opening(self.im, structure=kernel) else: background = cv2.dilate(cv2.erode(self.im, kernel), kernel) I2 = self.im - background return I2
def dtm_krauss_2015(dsm, ps, minf_r): nx, ny = dsm.shape scale = 20.0 / ps nnx = int(nx / scale + 0.5) nny = int(ny / scale + 0.5) scale_int = int(scale + 0.5) print scale print scale_int minf = filters.minimum_filter(dsm, minf_r) dwn = cv2.resize(minf, (nny, nnx), interpolation=cv2.INTER_NEAREST) # dwn = scipy.misc.imresize(minf,(nnx,nny),interp='cubic') dwn_o = ndimage.grey_opening(dwn, 5) dwn_g = filters.gaussian_filter(dwn_o, 2.5) # return scipy.misc.imresize(dwn_g,(nx,ny),interp='cubic') return cv2.resize(dwn_g, (ny, nx), interpolation=cv2.INTER_CUBIC)
def test_2d_ndimage_equivalence(): image = np.zeros((9, 9), np.uint8) image[2:-2, 2:-2] = 128 image[3:-3, 3:-3] = 196 image[4, 4] = 255 opened = grey.opening(image) closed = grey.closing(image) selem = ndi.generate_binary_structure(2, 1) ndimage_opened = ndi.grey_opening(image, footprint=selem) ndimage_closed = ndi.grey_closing(image, footprint=selem) testing.assert_array_equal(opened, ndimage_opened) testing.assert_array_equal(closed, ndimage_closed)
def dtm_krauss_2015(dsm,ps,minf_r): nx,ny = dsm.shape scale = 20.0/ps nnx = int(nx/scale+0.5) nny = int(ny/scale+0.5) scale_int = int(scale+0.5) print scale print scale_int minf = filters.minimum_filter(dsm,minf_r) dwn = cv2.resize(minf,(nny,nnx),interpolation = cv2.INTER_NEAREST) # dwn = scipy.misc.imresize(minf,(nnx,nny),interp='cubic') dwn_o = ndimage.grey_opening(dwn,5) dwn_g = filters.gaussian_filter(dwn_o,2.5) # return scipy.misc.imresize(dwn_g,(nx,ny),interp='cubic') return cv2.resize(dwn_g,(ny,nx),interpolation = cv2.INTER_CUBIC)
def __operationTask(self,input_var): ''' perform respective moephological operation on input block. Parameters ---------- input_var : type: 3d numpy array, ith block. Returns ------- output : type: 3d array, output of operation, ith block array. ''' D=self.__operationArgumentDic if self.__operation=="binary_closing": return ndimage.binary_closing(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif self.__operation=="binary_dilation": return ndimage.binary_dilation(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif self.__operation=="binary_erosion": return ndimage.binary_erosion(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif self.__operation=="binary_fill_holes": #the output might be different then scipy.ndimage return ndimage.binary_fill_holes(input_var, structure=D["structure"],output=D["output"], origin=D["origin"]) elif self.__operation=="binary_hit_or_miss": return ndimage.binary_hit_or_miss(input_var, structure1=D["structure1"],structure2=D["structure2"],output=D["output"], origin1=D["origin1"], origin2=D["origin2"]) elif self.__operation=="binary_opening": return ndimage.binary_opening(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif self.__operation=="binary_propagation": return ndimage.binary_propagation(input_var, structure=D["structure"],output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"]) elif self.__operation=="black_tophat": return ndimage.black_tophat(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="grey_dilation": return ndimage.grey_dilation(input_var, structure=D["structure"],size=D["size"], footprint=D["footprint"],output=D["output"], mode=D["mode"], cval=D["cval"], origin=D["origin"]) elif self.__operation=="grey_closing": return ndimage.grey_closing(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="grey_erosion": return ndimage.grey_erosion(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="grey_opening": return ndimage.grey_opening(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="morphological_gradient": return ndimage.morphological_gradient(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="morphological_laplace": return ndimage.morphological_laplace(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="white_tophat": return ndimage.white_tophat(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif self.__operation=="multiply": return input_var*D["scalar"] else: return input_var # no operation performed....
def test_grey_morph(): root = 'local_histos/' for datablock in get_data(): data,x,y = datablock data = get_uint_image(data) print(x,y) for i in range(0,30,4): mdata = nd.grey_opening(data,(i,i)) R = mahotas.thresholding.rc(mdata) gradient = get_grad_mag(mdata) fig = plt.figure(figsize=(12,12)) ax1 = fig.add_subplot(211) ax1.imshow(gradient) ax1.set_title(str(i)+', R: '+str(R)) ax2 = fig.add_subplot(212) ax2.hist(gradient.flatten(),50) plt.savefig(get_fname([root+'img',x,y,i,'.png'])) plt.close(fig)
def forward(self, labels: Tensor, *args) -> Tensor: r"""Computes the Opening loss -- i.e. the MSE due to performing a greyscale opening operation. :param labels: Predicted class probabilities :param args: Extra inputs, in case user also provides input/output image values. :return: Opening loss """ smooth_labels = labels.clone().detach().cpu().numpy() for i in range(labels.shape[0]): for j in range(labels.shape[1]): smooth_labels[i, j] = grey_opening(smooth_labels[i, j], self.radius) smooth_labels = torch.from_numpy(smooth_labels.astype(np.float32)) if labels.device.type == 'cuda': smooth_labels = smooth_labels.cuda() return nn.MSELoss()(labels, smooth_labels.detach())
def epi_mask(in_file, out_file=None): """Use grayscale morphological operations to obtain a quick mask of EPI data.""" from pathlib import Path import nibabel as nb import numpy as np from scipy import ndimage from skimage.morphology import ball if out_file is None: out_file = Path("mask.nii.gz").absolute() img = nb.load(in_file) data = img.get_fdata(dtype="float32") # First open to blur out the skull around the brain opened = ndimage.grey_opening(data, structure=ball(3)) # Second, close large vessels and the ventricles closed = ndimage.grey_closing(opened, structure=ball(2)) # Window filter on percentile 30 closed -= np.percentile(closed, 30) # Window filter on percentile 90 of data maxnorm = np.percentile(closed[closed > 0], 90) closed = np.clip(closed, a_min=0.0, a_max=maxnorm) # Calculate index of center of masses cm = tuple( np.round(ndimage.measurements.center_of_mass(closed)).astype(int)) # Erode the picture of the brain by a lot eroded = ndimage.grey_erosion(closed, structure=ball(5)) # Calculate the residual wshed = opened - eroded wshed -= wshed.min() wshed = np.round(1e3 * wshed / wshed.max()).astype(np.uint16) markers = np.zeros_like(wshed, dtype=int) markers[cm] = 2 markers[0, 0, -1] = -1 # Run watershed labels = ndimage.watershed_ift(wshed, markers) hdr = img.header.copy() hdr.set_data_dtype("uint8") nb.Nifti1Image( ndimage.binary_dilation(labels == 2, ball(2)).astype("uint8"), img.affine, hdr).to_filename(out_file) return out_file
def label_fusion(label, win=3): """Apply a morphological filtering on the label to remove isolated labels. In case the input is a two channel label (2D ndarray of boolean of same length) the labels of two channels are fused to remove overlaping segments of speech. :param label: input labels given in a 1D or 2D ndarray :param win: parameter or the morphological filters """ channel_nb = len(label) if channel_nb == 2: overlap_label = numpy.logical_and(label[0], label[1]) label[0] = numpy.logical_and(label[0], ~overlap_label) label[1] = numpy.logical_and(label[1], ~overlap_label) for idx, lbl in enumerate(label): cl = ndimage.grey_closing(lbl, size=win) label[idx] = ndimage.grey_opening(cl, size=win) return label
def detect_growth_markers(flow, wvd): wvd_diff_raw = flow.diff(wvd) / get_time_diff_from_coord( wvd.t)[:, np.newaxis, np.newaxis] wvd_diff_smoothed = filtered_tdiff(flow, wvd_diff_raw) s_struct = ndi.generate_binary_structure(2, 1)[np.newaxis, ...] wvd_diff_filtered = ndi.grey_opening( wvd_diff_smoothed, footprint=s_struct) * get_curvature_filter(wvd) watershed_markers = flow.label(wvd_diff_filtered >= 0.5) if isinstance(wvd, xr.DataArray): watershed_markers = filter_labels_by_length_and_mask( watershed_markers, wvd.data >= -5, 3) else: watershed_markers = filter_labels_by_length_and_mask( watershed_markers, wvd >= -5, 3) # marker_regions = flow.watershed(-wvd_diff_filtered, # watershed_markers != 0, # mask=wvd_diff_filtered<0.25, # structure=ndi.generate_binary_structure(3,1)) marker_labels = flow.label( ndi.binary_opening(wvd_diff_filtered >= 0.25, structure=s_struct)) # marker_labels = flow.label(ndi.binary_opening(marker_regions, structure=s_struct)) marker_labels = filter_labels_by_length_and_mask(marker_labels, watershed_markers != 0, 3) if isinstance(wvd, xr.DataArray): marker_labels = filter_labels_by_length_and_mask( marker_labels, wvd.data >= -5, 3) else: marker_labels = filter_labels_by_length_and_mask( marker_labels, wvd >= -5, 3) if isinstance(wvd, xr.DataArray): wvd_diff_raw = xr.DataArray(wvd_diff_raw, wvd.coords, wvd.dims) marker_labels = xr.DataArray(marker_labels, wvd.coords, wvd.dims) return wvd_diff_smoothed, marker_labels
def grey_opening(img, params): if params['footprint_shape'] == 'rectangle': footprint = np.ones( (params['footprint_size_y'], params['footprint_size_x']), dtype=int) elif params['footprint_shape'] == 'ellipse': a = params['footprint_size_x'] / 2 b = params['footprint_size_y'] / 2 x, y = np.mgrid[-ceil(a):ceil(a) + 1, -ceil(b):ceil(b) + 1] footprint = ((x / a)**2 + (y / b)**2 < 1) * 1 mode = params['mode'] cval = params['cval'] origin = params['origin'] return ndimage.grey_opening(img, size=None, footprint=footprint, structure=None, mode=mode, cval=cval, origin=origin)
def operationTask(input): D=operationArgumentDic #self.M.add_mem()#..................................................................................................... if operation=="binary_closing": return ndimage.binary_closing(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif operation=="binary_dilation": return ndimage.binary_dilation(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif operation=="binary_erosion": return ndimage.binary_erosion(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif operation=="binary_fill_holes": return ndimage.binary_fill_holes(input, structure=D["structure"],output=D["output"], origin=D["origin"]) elif operation=="binary_hit_or_miss": return ndimage.binary_hit_or_miss(input, structure1=D["structure1"],structure2=D["structure2"],output=D["output"], origin1=D["origin1"], origin2=D["origin2"]) elif operation=="binary_opening": return ndimage.binary_opening(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"]) elif operation=="binary_propagation": return ndimage.binary_propagation(input, structure=D["structure"],output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"]) elif operation=="black_tophat": return ndimage.black_tophat(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="grey_dilation": return ndimage.grey_dilation(input, structure=D["structure"],size=D["size"], footprint=D["footprint"],output=D["output"], mode=D["mode"], cval=D["cval"], origin=D["origin"]) elif operation=="grey_closing": return ndimage.grey_closing(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="grey_erosion": return ndimage.grey_erosion(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="grey_opening": return ndimage.grey_opening(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="morphological_gradient": return ndimage.morphological_gradient(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="morphological_laplace": return ndimage.morphological_laplace(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="white_tophat": return ndimage.white_tophat(input, structure=D["structure"], size=D["size"], footprint=D["footprint"], output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"]) elif operation=="intMultiply": return input*D["scalar"] else: return input
def readImage(thigh, img): imgOr = cv2.imread(img, 0) imgTh = cv2.imread(img, 0) opening = ndimage.grey_opening(imgTh, size=(3, 4)) gauss = cv2.GaussianBlur(opening, (11, 11), 0) imgTh[gauss < thigh] = 0 imgTh[gauss >= thigh] = 1 kernel = np.ones((1, 5)) erode = cv2.erode(imgTh, kernel, iterations=1) kernel = np.ones((2, 15)) dilate = cv2.dilate(erode, kernel, iterations=1) m.showImage6(imgOr, opening, gauss, imgTh, erode, dilate, "Original", "Apertura gris", "Suav Gauss", "Th", "Erode", "Dilate") return imgOr, dilate
def morphop(im, operation='open', radius='5'): """Perform a morphological operation with spherical structuring element. Parameters ---------- im : array, shape (M, N[, P]) 2D or 3D grayscale image. operation : string, optional The operation to perform. Choices are 'opening', 'closing', 'erosion', and 'dilation'. Imperative verbs also work, e.g. 'dilate'. radius : int, optional The radius of the structuring element (disk or ball) used. Returns ------- imout : array, shape (M, N[, P]) The transformed image. Raises ------ ValueError : if the image is not 2D or 3D. """ if im.ndim == 2: selem = morphology.disk(radius) elif im.ndim == 3: selem = morphology.ball(radius) else: raise ValueError("Image input to 'morphop' should be 2D or 3D" ", got %iD" % im.ndim) if operation.startswith('open'): imout = ndi.grey_opening(im, footprint=selem) elif operation.startswith('clos'): imout = ndi.grey_closing(im, footprint=selem) elif operation.startswith('dila'): imout = ndi.grey_dilation(im, footprint=selem) elif operation.startswith('ero'): imout = ndi.grey_erosion(im, footprint=selem) return imout
def morphop(im, operation='open', radius='5'): """Perform a morphological operation with spherical structuring element. Parameters ---------- im : array, shape (M, N[, P]) 2D or 3D grayscale image. operation : string, optional The operation to perform. Choices are 'opening', 'closing', 'erosion', and 'dilation'. Imperative verbs also work, e.g. 'dilate'. radius : int, optional The radius of the structuring element (disk or ball) used. Returns ------- imout : array, shape (M, N[, P]) The transformed image. Raises ------ ValueError : if the image is not 2D or 3D. """ if im.ndim == 2: selem = skmorph.disk(radius) elif im.ndim == 3: selem = skmorph.ball(radius) else: raise ValueError("Image input to 'morphop' should be 2D or 3D" ", got %iD" % im.ndim) if operation.startswith('open'): imout = nd.grey_opening(im, footprint=selem) elif operation.startswith('clos'): imout = nd.grey_closing(im, footprint=selem) elif operation.startswith('dila'): imout = nd.grey_dilation(im, footprint=selem) elif operation.startswith('ero'): imout = nd.grey_erosion(im, footprint=selem) return imout
def run(self, image): """ apply local otsu threshold to every z-stack and re-combine them image.shape = (x, y, z) """ image = image.copy() image = np.moveaxis(image, -1, 0) result = [] image *= 255 image = image.astype(np.uint8) for i, stack in enumerate(image): local_otsu = filters.rank.otsu(stack, disk(self.radius)) mask = stack > local_otsu stack = stack * mask if self.parameters['open_radius']: stack = ndimage.grey_opening(stack, self.parameters['open_radius']) result.append(stack) result = np.stack(result, axis=0) result = np.moveaxis(result, 0, -1) self.mask = result > 0 self.image = self.apply_mask(result, self.mask) return self.image
def closingFilter(image, mask): """ Aplica o filtro de abertura em uma imagem, de acordo com o tamanho da máscara passada por parâmetro. @param image deve ser um PIL.Image. @param mask string "row x cols" @return matriz com novos valores após aplicação do filtro """ threshold = 0.8 sumColors = numpy.float(numpy.sum(image)) isbinary = sumColors / image.size <= 1 - threshold (row, col) = [int(dim) for dim in mask.split('x')] structure = [[1 for i in range(col)] for j in range(row)] if isbinary: return ndimage.binary_opening(image, structure=structure) else: return ndimage.grey_opening(image, structure=structure)
def tophat_baseline(intensities): '''Perform "tophat" baseline removal, from the paper: Morphology-Based Automated Baseline Removal for Raman Spectra of Artistic Pigments. Perez-Pueyo et al., Appl. Spec. 2010''' # find the optimal window length old_b, num_equal = 0, 1 for window_length in count(start=3, step=2): b1 = grey_opening(intensities, window_length) if np.allclose(b1, old_b): if num_equal == 2: break num_equal += 1 else: num_equal = 1 old_b = b1 # use the smallest of the three equivalent window lengths window_length -= 4 # compute another estimate of the baseline b2 = 0.5 * (grey_dilation(intensities, window_length) + grey_erosion(intensities, window_length)) # combine the two estimates return np.minimum(b1, b2)
def opening(self, tissue, size=def_size): nd.grey_opening(self.P[tissue], size=[size,size,size], output=self.P[tissue])
def opening(P, size=def_size): return nd.grey_opening(P, size=[size,size,size])
def opening(src, r=2): se = ball(r) result = nd.grey_opening(src, footprint=se) return result
[1, 1, 1, 1, 1], [0, 1, 1, 1, 0]] print xorg, yorg, pres, xmax, ymax red = get_band_data( ds, 1 ) green = get_band_data( ds, 2 ) blue = get_band_data( ds, 3 ) epsilon = 0.0001 norm_diff_ratio = (red - blue) / (epsilon+blue+red) data1 = norm_diff_ratio data1 = speckle_filter(data1,'median', 11) data1 = linear_stretch(data1, max_percentile=50.0) data1 = ndimage.grey_opening(data1, size=(5,5), structure=octagon_2) thresh = threshold_otsu(data1,nbins=7) print "otsu1", thresh data1[data1>=thresh] = 0 data1[red==0] = 0 #data1[data1>0] = 255 #thresh2 = threshold_otsu(data1,nbins=7) #print "otsu2", thresh2 #data1[data1>thresh2] = 0 write_data(data1, outfileName, ds) #norm_diff_ratio = (red - green) / (epsilon+green+red) #data2 = linear_stretch(norm_diff_ratio) #write_data(data2, outfileName2, ds)
def opening(src, r=2): """Using the opening image algrithm to process the src image.""" se = ball(r) result = nd.grey_opening(src, footprint=se) return result
def hand(self): base_img = self.output_4326 in_img = os.path.join(self.hand_dir, HAND_FILE) out_img = self.hand_4326 # Get a subset of HAND for particular tile #if self.force or not os.path.isfile(out_img): #print "generate hand subset:"+out_img +" from:"+in_img self.generate_hand_subset(base_img, in_img, out_img) #if not os.path.isfile(self.hand_4326) and not self.force: # cmd = "gdalwarp -of GTIFF "+ out_img + " " + self.hand_4326 # print cmd # err = os.system(cmd) # print "Generating HAND Tif error:", err # #sys.exit(0) if os.path.isfile(self.output_4326_hand) and not self.force: return if verbose: print "Generating ", self.output_4326_hand src_ds = gdal.Open( self.output_4326_rgb ) driver = gdal.GetDriverByName( "GTiff" ) input_dataset = driver.CreateCopy( self.output_4326_hand, src_ds, 0, [ 'COMPRESS=DEFLATE' ] ) input_band = input_dataset.GetRasterBand(1) input_data = input_band.ReadAsArray(0, 0, input_dataset.RasterXSize, input_dataset.RasterYSize ) alpha_band = input_dataset.GetRasterBand(4) alpha_data = alpha_band.ReadAsArray(0, 0, input_dataset.RasterXSize, input_dataset.RasterYSize ) hand_ds = gdal.Open(out_img) hand_band = hand_ds.GetRasterBand(1) hand_data = hand_band.ReadAsArray(0, 0, hand_ds.RasterXSize, hand_ds.RasterYSize ) coastlines_ds = gdal.Open(self.coastlines) coastal_band = coastlines_ds.GetRasterBand(1) coastal_data = coastal_band.ReadAsArray(0, 0, coastlines_ds.RasterXSize, coastlines_ds.RasterYSize ) if app.verbose: print "hand_data:", hand_data.min(), hand_data.max() # HAND Masking mask = hand_data==0 input_data[mask]= 0 mask = hand_data==255 input_data[mask]= 0 mask = coastal_data>0 input_data[mask]= 0 # # Morphing to smooth and filter the data # octagon_2 =[[0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0]] morphed = ndimage.grey_opening(input_data, size=(5,5), structure=octagon_2) input_band.WriteArray(morphed, 0, 0) input_band.SetNoDataValue(0) # set transparency alpha_data[morphed<255]=0 alpha_data[morphed>=255]=255 alpha_band.WriteArray(alpha_data, 0, 0) input_data = None morphed = None input_dataset = None hand_band = None hand_ds = None src_ds = None coastlines_ds = None if app.verbose: print "Hand Morphed Done ", self.output_4326_hand
def opening(f, b=bm.create_structure_element_cross()): return mm.grey_opening(f, structure=b)
unI = sorted(unique(im.ravel())) nbin = len(unI) h = histogram(im, unI) P = h[0].astype(float)/sum(h[0]) w = cumsum(P) nbin = len(P) mu = cumsum(arange(1, nbin+1) * P) sigma2B = (mu[-1] * w[1:-1] - mu[1:-1])** 2 / w[1:-1]/(1-w[1:-1]) idx = where(sigma2B == max(sigma2B))[0][0] return h[1][idx] # ------------------------------------------------------------------------------ im = array(Image.open('textures/structure/015.jpg').convert("L")) / 255. bg = ndimage.grey_opening(im, footprint = circle(5)) pb.set_cmap(pb.cm.gray) pb.figure() pb.title("Original") pb.imshow(im,vmin=0, vmax=1) pb.savefig('result/structure/001.png', dpi=150) imbgadj = imadjust(im) pb.figure() pb.title("Normalized") pb.imshow(imbgadj, vmin = 0, vmax=1) pb.savefig('result/structure/002.png', dpi=150) t = otsu(imbgadj) print t
def opening(f, b): return mm.grey_opening(f, struture=b)
def watershed_cube(self): writeVerbose = False; #writeVerbose = self.dpWatershedTypes_verbose readVerbose = False; #readVerbose = self.dpWatershedTypes_verbose # load the probability data, allocate as array of volumes instead of 4D ndarray to maintain C-order volumes probs = [None]*self.ntypes; bwseeds = [None]*self.nfg_types if self.srclabels: # this code path is typically not used in favor of the label checker for fully labeled 3d gt components. # but, some ground truth (for example, 2d ECS cases) was only labeled with voxel type, # so this is used to create ground truth components from the voxel types. loadh5 = emLabels.readLabels(srcfile=self.srclabels, chunk=self.chunk.tolist(), offset=self.offset.tolist(), size=self.size.tolist(), data_type='uint16', verbose=writeVerbose) self.datasize = loadh5.datasize; self.chunksize = loadh5.chunksize; self.attrs = loadh5.data_attrs # pre-allocate for srclabels method, labeled areas are set to prob of 1 below for i in range(self.ntypes): probs[i] = np.zeros(self.size, dtype=emProbabilities.PROBS_DTYPE, order='C') if self.TminSrc < 2: # simple method with no "cleaning" for i in range(self.ntypes): probs[i][loadh5.data_cube==i] = 1 else: # optionally "clean" labels by removing small bg and fg components for each foreground type fgbwlabels = np.zeros(self.size, dtype=np.bool) for i in range(self.nfg_types): # background connected components and threshold comps, nlbls = nd.measurements.label(loadh5.data_cube!=i+1) comps, sizes = emLabels.thresholdSizes(comps, minSize=self.TminSrc) # foreground connected components and threshold comps, nlbls = nd.measurements.label(comps==0) comps, sizes = emLabels.thresholdSizes(comps, minSize=self.TminSrc) # keep track of mask for all foreground types bwlabels = (comps > 0); fgbwlabels = np.logical_or(fgbwlabels, bwlabels) probs[i+1][bwlabels] = 1 # set background type as all areas that are not in foreground types after "cleaning" probs[0][np.logical_not(fgbwlabels)] = 1 else: # check if background is in the prob file hdf = h5py.File(self.probfile,'r'); has_bg = self.bg_type in hdf; hdf.close() for i in range(0 if has_bg else 1, self.ntypes): loadh5 = dpLoadh5.readData(srcfile=self.probfile, dataset=self.types[i], chunk=self.chunk.tolist(), offset=self.offset.tolist(), size=self.size.tolist(), data_type=emProbabilities.PROBS_STR_DTYPE, verbose=readVerbose) self.datasize = loadh5.datasize; self.chunksize = loadh5.chunksize; self.attrs = loadh5.data_attrs probs[i] = loadh5.data_cube; del loadh5 # if background was not in hdf5 then create it as 1-sum(fg type probs) if not has_bg: probs[0] = np.ones_like(probs[1]) for i in range(1,self.ntypes): probs[0] -= probs[i] #assert( (probs[0] >= 0).all() ) # comment for speed probs[0][probs[0] < 0] = 0 # rectify # save some of the parameters as attributes self.attrs['types'] = self.types; self.attrs['fg_types'] = self.fg_types self.attrs['fg_types_labels'] = self.fg_types_labels # save connnetivity structure and warping LUT because used on each iteration (for speed) self.bwconn = nd.morphology.generate_binary_structure(dpLoadh5.ND, self.connectivity) self.bwconn2d = self.bwconn[:,:,1]; self.simpleLUT = None # load the warpings if warping mode is enabled warps = None if self.warpfile: warps = [None]*self.nwarps for i in range(self.nwarps): loadh5 = dpLoadh5.readData(srcfile=self.warpfile, dataset=self.warp_datasets[i], chunk=self.chunk.tolist(), offset=self.offset.tolist(), size=self.size.tolist(), verbose=readVerbose) warps[i] = loadh5.data_cube; del loadh5 # xxx - may need to revisit cropping, only intended to be used with warping method. if self.docrop: c = self.cropborder; s = self.size # DO NOT use variables c or s below # optionally apply filters in attempt to fill small background (membrane) probability gaps. if self.close_bg > 0: # create structuring element n = 2*self.close_bg + 1; h = self.close_bg; strel = np.zeros((n,n,n),dtype=np.bool); strel[h,h,h]=1; strel = nd.binary_dilation(strel,iterations=self.close_bg) # xxx - this was the only thing tried here that helped some but didn't work well against the skeletons probs[0] = nd.grey_closing( probs[0], structure=strel ) for i in range(self.nfg_types): probs[i+1] = nd.grey_opening( probs[i+1], structure=strel ) # xxx - this gave worse results #probs[0] = nd.maximum_filter( probs[0], footprint=strel ) # xxx - this had almost no effect #probs[0] = nd.grey_closing( probs[0], structure=strel ) # argmax produces the winner-take-all assignment for each supervoxel. # background type was put first, so voxType of zero is background (membrane). voxType = np.concatenate([x.reshape(x.shape + (1,)) for x in probs], axis=3).argmax(axis=3) # write out the winning type for each voxel # save some params from this watershed run in the attributes d = self.attrs.copy(); d['thresholds'] = self.Ts; d['Tmins'] = self.Tmins data = voxType.astype(emVoxelType.VOXTYPE_DTYPE) if self.docrop: data = data[c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]] emVoxelType.writeVoxType(outfile=self.outlabels, chunk=self.chunk.tolist(), offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(), chunksize=self.chunksize.tolist(), verbose=writeVerbose, attrs=d, data=data) # only allow a voxel to be included in the type of component that had max prob for that voxel. # do this by setting the non-winning probabilities to zero. for i in range(self.ntypes): probs[i][voxType != i] = 0; # create a type mask for each foreground type to select only current voxel type (winner-take-all from network) voxTypeSel = [None] * self.nfg_types; voxTypeNotSel = [None] * self.nfg_types for i in range(self.nfg_types): voxTypeSel[i] = (voxType == i+1) # create an inverted version, only used for complete fill not for warping (which requires C-contiguous), # so apply crop here if cropping enabled voxTypeNotSel[i] = np.logical_not(voxTypeSel[i]) if self.docrop: voxTypeNotSel[i] = voxTypeNotSel[i][c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]] # need C-contiguous probabilities for binary_warping. for i in range(self.nfg_types): if not probs[i+1].flags.contiguous or np.isfortran(probs[i+1]): probs[i+1] = np.ascontiguousarray(probs[i+1]) # iteratively apply thresholds, each time only keeping components that have fallen under size Tmin. # at last iteration keep all remaining components. # do this separately for foreground types. for k in range(self.nTmin): for i in range(self.nfg_types): bwseeds[i] = np.zeros(self.size, dtype=np.bool, order='C') for i in range(self.nthresh): if self.dpWatershedTypes_verbose: print('creating supervoxels at threshold = %.8f with Tmin = %d' % (self.Ts[i], self.Tmins[k])) t = time.time() types_labels = [None]*self.nfg_types; types_uclabels = [None]*self.nfg_types; if self.skeletonize: types_sklabels = [None]*self.nfg_types types_nlabels = np.zeros((self.nfg_types,),dtype=np.int64) types_ucnlabels = np.zeros((self.nfg_types,),dtype=np.int64) for j in range(self.nfg_types): # run connected components at this threshold on labels labels, nlabels = nd.measurements.label(probs[j+1] > self.Ts[i], self.bwconn) # merge the current thresholded components with the previous seeds to get current bwlabels bwlabels = np.logical_or(labels, bwseeds[j]) # take the current components under threshold and merge with the seeds for the next iteration if i < self.nthresh-1: labels, sizes = emLabels.thresholdSizes(labels, minSize=-self.Tmins[k]) bwseeds[j] = np.logical_or(labels, bwseeds[j]) # this if/elif switch determines the main method for creating the labels. # xxx - make cropping to be done in more efficient way, particular to avoid filling cropped areas if self.method == 'overlap': # definite advantage to this method over other methods, but cost is about 2-3 times slower. # labels are linked per zslice using precalculated slice to slice warpings based on the probs. labels, nlabels = self.label_overlap(bwlabels, voxTypeSel[j], warps) # xxx - add switches to only optionally export the unconnected labels #uclabels = labels; ucnlabels = nlabels; # crop right after the labels are created and stay uncropped from here. # xxx - labels will be wrong unless method implicitly handled the cropping during the labeling. # currently only the warping method is doing, don't need cropping for other methods anyways. if self.docrop: labels = labels[c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]] # this method can not create true unconnected 3d labels, but should be unconnected in 2d. # NOTE: currently this only removes 6-connectivity, no matter what specified connecitity is # xxx - some method of removing adjacencies with arbitrary connectivity? uclabels, ucnlabels = emLabels.remove_adjacencies(labels) elif self.method == 'skim-ws': # xxx - still trying to evaluate if there is any advantage to this more traditional watershed. # it does not leave a non-adjacency boundary and is about 1.5 times slower than bwmorph # run connected components on the thresholded labels merged with previous seeds labels, nlabels = nd.measurements.label(bwlabels, self.bwconn) # run a true watershed based the current foreground probs using current components as markers labels = morph.watershed(probs[j+1], labels, connectivity=self.bwconn, mask=voxTypeSel[j]) # remove any adjacencies created during the watershed # NOTE: currently this only removes 6-connectivity, no matter what specified connecitity is # xxx - some method of removing adjacencies with arbitrary connectivity? uclabels, ucnlabels = emLabels.remove_adjacencies(labels) else: if self.method == 'comps-ws' and i>1: # this is an alternative to the traditional watershed that warps out only based on stepping # back through the thresholds in reverse order. has advantages of non-connectivity. # may help slightly for small supervoxels but did not show much improved metrics in # terms of large-scale connectivity (against skeletons) # about 4-5 times slower than regular warping method. # make an unconnected version of bwlabels by warping out but with mask only for this type # everything above current threshold is already labeled, so only need to use gray thresholds # starting below the current threshold level. bwlabels, diff, self.simpleLUT = binary_warping(bwlabels, np.ones(self.size,dtype=np.bool), mask=voxTypeSel[j], borderval=False, slow=True, simpleLUT=self.simpleLUT, connectivity=self.connectivity, gray=probs[j+1], grayThresholds=self.Ts[i-1::-1].astype(np.float32, order='C')) else: assert( self.method == 'comps' ) # bad method option # make an unconnected version of bwlabels by warping out but with mask only for this type bwlabels, diff, self.simpleLUT = binary_warping(bwlabels, np.ones(self.size,dtype=np.bool), mask=voxTypeSel[j], borderval=False, slow=True, simpleLUT=self.simpleLUT, connectivity=self.connectivity) # run connected components on the thresholded labels merged with previous seeds (warped out) uclabels, ucnlabels = nd.measurements.label(bwlabels, self.bwconn); # in this case the normal labels are the same as the unconnected labels because of warping labels = uclabels; nlabels = ucnlabels; # optionally make a skeletonized version of the unconnected labels # xxx - revisit this, currently not being used for anything, started as a method to skeletonize GT if self.skeletonize: # method to skeletonize using max range endpoints only sklabels, sknlabels = emLabels.ucskeletonize(uclabels, mask=voxTypeSel[j], sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None) assert( sknlabels == ucnlabels ) # fill out these labels out so that they fill in remaining voxels based on voxType. # this uses bwdist method for finding nearest neighbors, so connectivity can be violoated. # this is mitigated by first filling out background using the warping transformation # (or watershed) above, then this step is only to fill in remaining voxels for the # current foreground voxType. labels = emLabels.nearest_neighbor_fill(labels, mask=voxTypeNotSel[j], sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None) # save the components labels generated for this type types_labels[j] = labels.astype(emLabels.LBLS_DTYPE, copy=False); types_uclabels[j] = uclabels.astype(emLabels.LBLS_DTYPE, copy=False); types_nlabels[j] = nlabels if self.fg_types_labels[j] < 0 else 1 types_ucnlabels[j] = ucnlabels if self.fg_types_labels[j] < 0 else 1 if self.skeletonize: types_sklabels[j] = sklabels.astype(emLabels.LBLS_DTYPE, copy=False) # merge the fg components labels. they can not overlap because voxel type is winner-take-all. nlabels = 0; ucnlabels = 0; labels = np.zeros(self.size_crop, dtype=emLabels.LBLS_DTYPE); uclabels = np.zeros(self.size_crop, dtype=emLabels.LBLS_DTYPE); if self.skeletonize: sklabels = np.zeros(self.size, dtype=emLabels.LBLS_DTYPE); for j in range(self.nfg_types): sel = (types_labels[j] > 0); ucsel = (types_uclabels[j] > 0); if self.skeletonize: sksel = (types_sklabels[j] > 0); if self.fg_types_labels[j] < 0: labels[sel] += (types_labels[j][sel] + nlabels); uclabels[ucsel] += (types_uclabels[j][ucsel] + ucnlabels); if self.skeletonize: sklabels[sksel] += (types_sklabels[j][sksel] + ucnlabels); nlabels += types_nlabels[j]; ucnlabels += types_ucnlabels[j]; else: labels[sel] = self.fg_types_labels[j]; uclabels[ucsel] = self.fg_types_labels[j]; if self.skeletonize: sklabels[sksel] = self.fg_types_labels[j] nlabels += 1; ucnlabels += 1; if self.dpWatershedTypes_verbose: print('\tnlabels = %d' % (nlabels,)) #print('\tnlabels = %d %d' % (nlabels,labels.max())) # for debug only #assert(nlabels == labels.max()) # sanity check for non-overlapping voxTypeSel, comment for speed print('\tdone in %.4f s' % (time.time() - t,)) # make a fully-filled out version using bwdist nearest foreground neighbor wlabels = emLabels.nearest_neighbor_fill(labels, mask=None, sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None) # write out the results if self.nTmin == 1: subgroups = ['%.8f' % (self.Ts[i],)] else: subgroups = ['%d' % (self.Tmins[k],), '%.8f' % (self.Ts[i],)] d = self.attrs.copy(); d['threshold'] = self.Ts[i]; d['types_nlabels'] = types_nlabels; d['Tmin'] = self.Tmins[k] emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(), offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(), chunksize=self.chunksize.tolist(), data=labels, verbose=writeVerbose, attrs=d, strbits=self.outlabelsbits, subgroups=['with_background']+subgroups ) emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(), offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(), chunksize=self.chunksize.tolist(), data=wlabels, verbose=writeVerbose, attrs=d, strbits=self.outlabelsbits, subgroups=['zero_background']+subgroups ) d['type_nlabels'] = types_ucnlabels; emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(), offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(), chunksize=self.chunksize.tolist(), data=uclabels, verbose=writeVerbose, attrs=d, strbits=self.outlabelsbits, subgroups=['no_adjacencies']+subgroups ) if self.skeletonize: emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(), offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(), chunksize=self.chunksize.tolist(), data=sklabels, verbose=writeVerbose, attrs=d, strbits=self.outlabelsbits, subgroups=['skeletonized']+subgroups )