Example #1
0
def _apply_closing(nr, footprint, three_d):
    if three_d:
        pancake = np.swapaxes(np.tile(footprint, (3, 1, 1)), 0, -1)
        nr = 1.0*grey_closing(nr, footprint=pancake)
    else:
        nt = nr.shape[2]
        for i in range(0, nt):
            nr[:, :, i] = 1.0*grey_closing(nr[:, :, i], footprint=footprint)
    return nr
Example #2
0
def make_mask(img, display=False):
    # for noise removal
    eroded = morphology.erosion(img, np.ones([3, 3]))
    dilation = morphology.dilation(eroded, np.ones([3, 3]))
    # Filters for further enhancement
    result = ndimage.grey_closing(dilation, 2)
    result = ndimage.median_filter(result, size=3)
    result = ndimage.gaussian_filter(result, 2)
    result = ndimage.grey_closing(img, 5)
    return result
Example #3
0
 def test_closing(self):
     self.image.closing(3)
     original_image = retina_grayscale.Retina_grayscale(
         None, _image_path, 1)
     assert_array_equal(
         self.image.np_image,
         ndimage.grey_closing(original_image.np_image, size=(3, 3)))
Example #4
0
    def grey_closing(im, niters=3):
        """
        Apply grey closing operation to image.

        Args:

            im (np.ndarray(np.float32)) - pixel values. Shape may be (X,Y), (X,Y,3), or (N,X,Y,3) depending on image type.

            niters (int) - number of grey closing iterations

        Returns

            im (np.ndarray(np.float32))

        """

        # get filter size
        shape_to_size = {2: 3, 3: (3, 3, 1), 4: (1, 3, 3, 1)}
        filter_size = shape_to_size[len(im.shape)]

        # apply grey closing
        for _ in range(niters):
            im = grey_closing(im, size=filter_size)

        return im
Example #5
0
def close_image(image_filtered):
    """close_image example ndimage.grey_closing
    """
    closed = ndimage.grey_closing(np.abs(image_filtered), size=(5, 5, 5))
    new_image = nib.Nifti1Image(normalise(closed), affine)
    new_image.set_data_dtype(np.float32)
    nib.save(new_image, 'image_fill.nii.gz')
Example #6
0
def test_depth_algorithm(image_filtered, basename='depth'):
    """test_depth_algorithm
    Depth algorithm testing
    """
    print "Testing depth algorithm"
    # t1 = time.time()

    # Close gaussian filtered image
    c = ndimage.grey_closing(np.abs(image_filtered), size=(5, 5, 5))
    # Mask closed image
    # cm = c * (c>8000).astype(float)
    cm = c / (ndimage.maximum(c))
    # avoid div by zero
    cm = 0.99 * cm + 0.00001
    # Regularise gaussian filtered image
    gm = (np.abs(image_filtered) / cm)  # * (c>8000).astype(float)
    # Depth = difference between closed image and regularised gaussian
    depth = c / ndimage.maximum(c) - gm / ndimage.maximum(gm)
    # mask regularised image
    depth = depth * (c > 0.00015).astype(float)
    # Normalise
    # depth = (depth -
    # ndimage.minimum(depth))/(ndimage.maximum(depth)-ndimage.minimum(depth))
    # save to nifti
    new_image = nib.Nifti1Image(np.abs(depth), affine)
    new_image.set_data_dtype(np.float32)
    nib.save(new_image, basename + '.nii.gz')
Example #7
0
def grey_processing(inputImg):
#    fp = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
    fp = np.ones((3, 3))
    data = nd.median_filter(inputImg, size=7)
    data = nd.grey_closing(data, footprint=fp)
    data = nd.grey_opening(data,footprint=fp)
    return data
def PredictOnImg(img_path):
    img_origin = imread(img_path)  # (H, W)
    img_origin = img_origin / 255

    # estimate 'background' color by a median filter
    background = signal.medfilt2d(img_origin, 11)
    #save('background.png', background)

    # compute 'foreground' mask as anything that is significantly darker than
    # the background
    foreground = img_origin < background - 0.1
    #save('foreground_mask.png', foreground)
    back = np.average(background)

    # Lets remove some splattered ink
    mod = ndimage.filters.median_filter(foreground, 2)
    mod = ndimage.grey_closing(mod, size=(2, 2))

    # either return forground or average of background

    img_prediction = np.where(mod, img_origin, back)  ## 1 is pure white

    img_prediction = (img_prediction * 255).astype(np.uint8)
    background = (background * 255).astype(np.uint8)
    foreground = (foreground * 255).astype(np.uint8)

    return img_prediction, background, foreground
def MMF(Y,rn,bg):
    stru_len_op = int(bg)
    stru_len_clo = int(bg*1.5)
    
    stru_ele_op = np.linspace(0,0,stru_len_op)
    stru_ele_clo = np.linspace(0,0,stru_len_clo)
    
    #triangular wave
    tri_wave = []
    amp = 1.0
    width = 1
    samp = rn
    asym = 0.5
    points = 1
    
    while points <= samp:
    
        Xi = 0.1*points
        if 0 <= Xi and Xi <= width*asym:
            tri_wave.append(amp*Xi/(width*asym))
        elif Xi > width*asym and Xi <width:
            tri_wave.append(amp*(width-Xi)/(width*(1-asym)))
        else:
            tri_wave.append(0)
        points += 1

    #low-pass
    op_flat = nd.grey_opening(Y,size = (stru_len_op),structure = stru_ele_op)
    clo_flat = nd.grey_closing(op_flat,size = (stru_len_clo),structure = stru_ele_clo)
    
    reducing = []
    for reduce in range(len(Y)):
        reducing.append(Y[reduce] - clo_flat[reduce])
    
    op_tri = nd.grey_opening(reducing,size = (rn),structure = tri_wave)
    clo_tri = nd.grey_closing(reducing,size = (rn),structure = tri_wave)

    after_stru_ele =np.linspace(0,0,rn) 
    
    op_than_clo = nd.grey_closing(op_tri,size = (rn),structure = after_stru_ele)
    clo_than_op = nd.grey_opening(clo_tri,size = (rn),structure = after_stru_ele)

    plusing = []
    for plus in range(len(op_than_clo)):
        plusing.append((op_than_clo[plus]+clo_than_op[plus])/2.0)
    
    return plusing, clo_flat
Example #10
0
def run_FreeCAD_ImageT(self):

    from scipy import ndimage
    fn = self.getData('image')
    import matplotlib.image as mpimg

    img = mpimg.imread(fn)
    (sa, sb, sc) = img.shape
    red = 0.005 * (self.getData("red") + 100)
    green = 0.005 * (self.getData("green") + 100)
    blue = 0.005 * (self.getData("blue") + 100)
    #blue=0
    say("rgb", red, green, blue)

    # andere filtre
    #img = ndimage.sobel(img)
    #img = ndimage.laplace(img)

    im2 = img[:, :, 0] * red + img[:, :, 1] * green + img[:, :, 2] * blue
    im2 = np.round(im2)

    if self.getData('invert'):
        im2 = 1 - im2

    #im2 = ndimage.sobel(im2)

    ss = int((self.getData('maskSize') + 100) / 20)
    say("ss", ss)
    if ss != 0:
        mode = self.getData('mode')
        say("mode", mode)
        if mode == 'closing':
            im2 = ndimage.grey_closing(im2, size=(ss, ss))
        elif mode == 'opening':
            im2 = ndimage.grey_opening(im2, size=(ss, ss))
        elif mode == 'erosion':
            im2 = ndimage.grey_erosion(im2, size=(ss, ss))
        elif mode == 'dilitation':
            im2 = ndimage.grey_dilation(im2, footprint=np.ones((ss, ss)))
        else:
            say("NO MODE")

    nonzes = np.where(im2 == 0)
    pts = [
        FreeCAD.Vector(sb + -x, sa - y)
        for y, x in np.array(nonzes).swapaxes(0, 1)
    ]

    h = 10
    pts = [
        FreeCAD.Vector(
            sb + -x, sa - y,
            (red * img[y, x, 0] + green * img[y, x, 1] + blue * img[y, x, 2]) *
            h) for y, x in np.array(nonzes).swapaxes(0, 1)
    ]
    colors = [img[y, x] for y, x in np.array(nonzes).swapaxes(0, 1)]
    say("len pts", len(pts))
    self.setData("Points_out", pts)
 def closing(self, size_structure):
     """
     erodes and dilates the stored image, by default the structure is a cross
     :param size_structure: size of kernel to apply in the filter
     """
     self._copy()
     self.np_image = ndimage.grey_closing(self.np_image,
                                          size=(size_structure,
                                                size_structure))
Example #12
0
def apply(array, **kwargs):
    """
    Apply a set of standard filter to array data: 
    
    Call: apply(array-data, <list of key=value arguments>)

    The list of key-value define the filtering to be done and should be given in
    the order to be process. Possible key-value are:
    
      * smooth:  gaussian filtering, value is the sigma parameter (scalar or tuple)
      * uniform: uniform  filtering (2)
      * max:     maximum  filtering (1)
      * min:     minimum  filtering (1)
      * median:  median   filtering (1)
      
      * dilate: grey dilatation (1)
      * erode:  grey erosion    (1)
      * close:  grey closing    (1)
      * open:   grey opening    (1)
      
      * linear_map: call linear_map(), value is the tuple (min,max)   (3)
      * normalize:  call normalize(),  value is the method            (3)
      * adaptive:   call adaptive(),   value is the sigma             (3)
      * adaptive_:  call adaptive(),   with uniform kernel            (3)
          
    The filtering is done using standard scipy.ndimage functions.
    
    (1) The value given (to the key) is the width of the the filter: 
        the distance from the center pixel (the size of the filter is thus 2*value+1)
        The neighborhood is an (approximated) boolean circle (up to discretization)
    (2) Same as (*) but the neighborhood is a complete square
    (3) See doc of respective function
    """
    for key in kwargs:
        value = kwargs[key]
        if key not in ('smooth','uniform'):
            fp = _kernel.distance(array.ndim*(2*value+1,))<=value  # circular filter
            
        if   key=='smooth' : array = _nd.gaussian_filter(array, sigma=value)
        elif key=='uniform': array = _nd.uniform_filter( array, size=2*value+1)
        elif key=='max'    : array = _nd.maximum_filter( array, footprint=fp)
        elif key=='min'    : array = _nd.minimum_filter( array, footprint=fp)
        elif key=='median' : array = _nd.median_filter(  array, footprint=fp)

        elif key=='dilate' : array = _nd.grey_dilation(  array, footprint=fp)
        elif key=='erode'  : array = _nd.grey_erosion(   array, footprint=fp)
        elif key=='open'   : array = _nd.grey_opening(   array, footprint=fp)
        elif key=='close'  : array = _nd.grey_closing(   array, footprint=fp)
        
        elif key=='linear_map': array = linear_map(array, min=value[0], max=value[1])
        elif key=='normalize' : array = normalize( array, method = value)
        elif key=='adaptive'  : array = adaptive(  array, sigma  = value, kernel='gaussian')
        elif key=='adaptive_' : array = adaptive(  array, sigma  = value, kernel='uniform')
        else: 
            print '\033[031mUnrecognized filter :', key
            
    return array
Example #13
0
	def global_threshold(self, im_co, vis_pix, args): #do global thresholding to isolate slow fibers
		co_pix = np.array(im_co.getdata(),dtype=np.uint8)
		co_pix = co_pix*vis_pix
		T_otsu = mh.otsu(co_pix.reshape(im_co.size[1],im_co.size[0]))
		thresholded_copix = (co_pix*(co_pix > T_otsu))
		#thresholded_copix = si.grey_erosion(np.array(thresholded_copix).reshape(im_co.size[1],im_co.size[0]), size=(3,3))
		thresholded_copix = si.grey_closing(np.array(thresholded_copix).reshape(im_co.size[1],im_co.size[0]), size=(10,10))
		#thresholded_copix = si.grey_closing(thresholded_copix, size=(3,3))
		return thresholded_copix
Example #14
0
    def __call__(self, 
                 img: np.ndarray, 
                 mode: Optional[str]=None,
                 radius: Optional[int]=None,
                 binary: Optional[bool]=None) -> np.ndarray:
        """
        Apply the transform to `img`.

        """
        self.mode = self.mode if mode is None else mode
        self.radius = self.radius if radius is None else radius
        self.binary = self.binary if binary is None else binary

        input_ndim = img.squeeze().ndim # spatial ndim
        if input_ndim == 2:
            structure = ndi.generate_binary_structure(2, 1)
        elif input_ndim == 3:
            structure = ndi.generate_binary_structure(3, 1)
        else:
            raise ValueError('Currently only support 2D&3D data')
        
        channel_dim = None
        if input_ndim != img.ndim:
            channel_dim = img.shape.index(1)
            img = img.squeeze()

        if self.mode == 'closing':
            if self.binary:
                img = ndi.binary_closing(img, structure=structure, iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_closing(img, footprint=structure)        
        elif self.mode == 'dilation':
            if self.binary:
                img = ndi.binary_dilation(img, structure=structure, iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_dilation(img, footprint=structure)
        elif self.mode == 'erosion':
            if self.binary:
                img = ndi.binary_erosion(img, structure=structure, iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_erosion(img, footprint=structure)
        elif self.mode == 'opening':
            if self.binary:
                img = ndi.binary_opening(img, structure=structure, iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_opening(img, footprint=structure)
        else:
            raise ValueError(f'Unexpected keyword {self.mode}')
        
        if channel_dim is not None:
            return np.expand_dims(img, axis=channel_dim)
        else:
            return img
Example #15
0
def make_outer_surface(filled_file, output_surface_file, se_diameter=15):
    if os.path.isfile(output_surface_file):
        log.info('Dural surface mesh %s already exists' %
                 os.path.basename(output_surface_file))
        return
    # read MRI
    volume = nbfs.MGHImage.from_filename(filled_file)
    volume = volume.get_data()

    # change elements from {0,1} to {0,255}
    volume *= 255

    # Gaussian filter (sigma=1mm)
    gaussian_volume = scimage.gaussian_filter(
        volume, 1, mode='constant')  # Is this correct?

    # Binarize filtered image
    avg = gaussian_volume.mean()
    gaussian_volume[gaussian_volume > avg] = 255
    gaussian_volume[gaussian_volume < avg] = 0

    # Morphological closing:

    # Construct structuring element
    xx, yy = np.meshgrid(list(range(-1 * se_diameter + 1, se_diameter)),
                         list(range(-1 * se_diameter + 1, se_diameter)))
    se = (xx**2 + yy**2) < se_diameter**2

    # Take closing

    closed_volume = np.stack([
        scimage.grey_closing(
            gaussian_volume[..., i], structure=se, mode='constant')
        for i in range(gaussian_volume.shape[-1])
    ],
                             axis=-1)

    # Binarize closed image
    thresh = closed_volume.max() / 2
    closed_volume[closed_volume <= thresh] = 0
    closed_volume[closed_volume > thresh] = 255

    # vertices,faces = isosurface(*,100)
    vertices, faces, _, _ = measure.marching_cubes(closed_volume, 100)

    # Reorient
    v2 = np.zeros(vertices.shape)
    v2[:, 0] = 129 - vertices[:, 0]
    v2[:, 1] = vertices[:, 2] - 129
    v2[:, 2] = 129 - vertices[:, 1]

    vertices = v2

    # Write geometry file
    nbfs.write_geometry(output_surface_file, vertices, faces)
	def __test_grey_closing_operation(self,input_var):		
		print("\n grey_closing Voxel testing...")
		start_time = t.time()
		v_output = vc.grey_closing(input_var,structure=structure,no_of_blocks=PL[0],fakeghost=PL[1],make_float32=False)
		print("grey_closing Voxel testing time taken: ",(t.time() - start_time)," sec")
		#print("\n grey_closing Default testing...")
		start_time = t.time()
		d_output = ndimage.grey_closing(input_var,structure=structure)
		print("grey_closing Default testing time taken: ",(t.time() - start_time)," sec")		
		msgs = "grey_closing_operation_FAIL_with parameters: ",PL
		self.assertTrue((d_output==v_output).all(), msg=msgs)
Example #17
0
def rescale(image, label, input_space, output_space=(0.39, 0.39, 0.39)):
    assert image.shape == label.shape, "image shape:{} != label shape{}".format(
        image.shape, label.shape)
    zoom_factor = tuple([input_space[i] / output_space[i] for i in range(3)])
    # image cubic interpolation
    image_rescale = zoom(image, zoom_factor, order=3)
    # label nearest interpolation
    label_rescale = zoom(label, zoom_factor, order=0)
    label_rescale = grey_closing(label_rescale, size=(5, 5, 5))

    return image_rescale, label_rescale
def find_number_of_columns(sum_image):
    eroded = snd.grey_closing(sum_image, 11)
    otsu = cvw.threshold_otsu(eroded)
    dilated_otsu = cvw.dilate(otsu, 3)

    dilated = dilated_otsu[0]  # 1-D
    clipped, *_ = clip(dilated)
    diffed = np.diff(clipped)
    num_changes = np.count_nonzero(diffed)
    num_columns = (num_changes + 2) // 2

    return num_columns
def denoise_im_with_back(inp):
    bg = signal.medfilt2d(inp, 11)
    save('background.png', bg)
    mask = inp < bg - 0.1
    save('foreground_mask.png', mask)
    back = np.average(bg)

    mod = ndimage.filters.median_filter(mask, 2)
    mod = ndimage.grey_closing(mod, size=(2, 2))

    out = np.where(mod, inp, back)
    return out
Example #20
0
def test_2d_ndimage_equivalence():
    image = np.zeros((9, 9), np.uint8)
    image[2:-2, 2:-2] = 128
    image[3:-3, 3:-3] = 196
    image[4, 4] = 255

    opened = gray.opening(image)
    closed = gray.closing(image)

    footprint = ndi.generate_binary_structure(2, 1)
    ndimage_opened = ndi.grey_opening(image, footprint=footprint)
    ndimage_closed = ndi.grey_closing(image, footprint=footprint)

    assert_array_equal(opened, ndimage_opened)
    assert_array_equal(closed, ndimage_closed)
Example #21
0
def test_2d_ndimage_equivalence():
    image = np.zeros((9, 9), np.uint8)
    image[2:-2, 2:-2] = 128
    image[3:-3, 3:-3] = 196
    image[4, 4] = 255

    opened = grey.opening(image)
    closed = grey.closing(image)

    selem = ndi.generate_binary_structure(2, 1)
    ndimage_opened = ndi.grey_opening(image, footprint=selem)
    ndimage_closed = ndi.grey_closing(image, footprint=selem)

    testing.assert_array_equal(opened, ndimage_opened)
    testing.assert_array_equal(closed, ndimage_closed)
Example #22
0
def rotate_center(image, label, angle=15, ax=0):

    assert image.shape == label.shape

    if angle < 1:
        return image, label

    axes = tuple({0, 1, 2}.difference({ax}))
    # image cubic interpolation
    img = rotate(image, angle, axes=axes, reshape=False, order=3)
    # label nearest interpolation
    lbl = rotate(label, angle, axes=axes, reshape=False, order=0)
    lbl = grey_closing(lbl, size=(5, 5, 5))

    return img, lbl
Example #23
0
	def __operationTask(self,input_var):
		'''
		perform respective moephological operation on input block.
        Parameters
        ----------
        input_var  	: type: 3d numpy array, ith block.		
		
        Returns
		-------
		output     : type: 3d array, output of operation, ith block array.
        '''
		
		
		D=self.__operationArgumentDic
		if self.__operation=="binary_closing":	
			return ndimage.binary_closing(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
		elif self.__operation=="binary_dilation":
			return ndimage.binary_dilation(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
		elif self.__operation=="binary_erosion":
			return ndimage.binary_erosion(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
		elif self.__operation=="binary_fill_holes": #the output might be different then scipy.ndimage  
			return ndimage.binary_fill_holes(input_var, structure=D["structure"],output=D["output"], origin=D["origin"])
		elif self.__operation=="binary_hit_or_miss":
			return ndimage.binary_hit_or_miss(input_var, structure1=D["structure1"],structure2=D["structure2"],output=D["output"], origin1=D["origin1"], origin2=D["origin2"])
		elif self.__operation=="binary_opening":
			return ndimage.binary_opening(input_var, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
		elif self.__operation=="binary_propagation":			
			return ndimage.binary_propagation(input_var, structure=D["structure"],output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"])
		elif self.__operation=="black_tophat":
			return ndimage.black_tophat(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="grey_dilation":
			return ndimage.grey_dilation(input_var, structure=D["structure"],size=D["size"], footprint=D["footprint"],output=D["output"], mode=D["mode"], cval=D["cval"], origin=D["origin"])			
		elif self.__operation=="grey_closing":
			return ndimage.grey_closing(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="grey_erosion":
			return ndimage.grey_erosion(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="grey_opening":
			return ndimage.grey_opening(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="morphological_gradient":
			return ndimage.morphological_gradient(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="morphological_laplace":
			return ndimage.morphological_laplace(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="white_tophat":
			return ndimage.white_tophat(input_var, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
		elif self.__operation=="multiply":
			return input_var*D["scalar"]		
		else:
			return input_var # no operation performed....
def preprocess(img):
    b, g, r = cv2.split(img)
    gray = rgb2Red(img)
    gray_blur = cv2.GaussianBlur(gray, (5, 5), 0)
    gray = cv2.addWeighted(gray, 1.5, gray_blur, -0.5, 0, gray)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
    gray = ndimage.grey_closing(gray, structure=kernel)
    gray = cv2.equalizeHist(gray)
    #gray = cv2.GaussianBlur(gray, (5,5), 0)
    #gray = cv2.medianBlur(gray,9)

    #gray_la= ndimage.laplace(gray)
    #gray = cv2.GaussianBlur(gray,(3,3),0)
    #gray_eq = cv2.equalizeHist(gray)

    return gray
Example #25
0
def denoise_im_with_back(inp):
    # estimate 'background' color by a median filter
    bg = signal.medfilt2d(inp, 11)
    save('background.png', bg)

    # compute 'foreground' mask as anything that is significantly darker than
    # the background
    mask = inp < bg - 0.1
    save('foreground_mask.png', mask)
    back = np.average(bg)

    mod = ndimage.filters.median_filter(mask, 2)
    mod = ndimage.grey_closing(mod, size=(2, 2))

    # either return forground or average of background

    out = np.where(mod, inp, back)  ## 1 is pure white
    return out
Example #26
0
def epi_mask(in_file, out_file=None):
    """Use grayscale morphological operations to obtain a quick mask of EPI data."""
    from pathlib import Path
    import nibabel as nb
    import numpy as np
    from scipy import ndimage
    from skimage.morphology import ball

    if out_file is None:
        out_file = Path("mask.nii.gz").absolute()

    img = nb.load(in_file)
    data = img.get_fdata(dtype="float32")
    # First open to blur out the skull around the brain
    opened = ndimage.grey_opening(data, structure=ball(3))
    # Second, close large vessels and the ventricles
    closed = ndimage.grey_closing(opened, structure=ball(2))

    # Window filter on percentile 30
    closed -= np.percentile(closed, 30)
    # Window filter on percentile 90 of data
    maxnorm = np.percentile(closed[closed > 0], 90)
    closed = np.clip(closed, a_min=0.0, a_max=maxnorm)
    # Calculate index of center of masses
    cm = tuple(
        np.round(ndimage.measurements.center_of_mass(closed)).astype(int))
    # Erode the picture of the brain by a lot
    eroded = ndimage.grey_erosion(closed, structure=ball(5))
    # Calculate the residual
    wshed = opened - eroded
    wshed -= wshed.min()
    wshed = np.round(1e3 * wshed / wshed.max()).astype(np.uint16)
    markers = np.zeros_like(wshed, dtype=int)
    markers[cm] = 2
    markers[0, 0, -1] = -1
    # Run watershed
    labels = ndimage.watershed_ift(wshed, markers)

    hdr = img.header.copy()
    hdr.set_data_dtype("uint8")
    nb.Nifti1Image(
        ndimage.binary_dilation(labels == 2, ball(2)).astype("uint8"),
        img.affine, hdr).to_filename(out_file)
    return out_file
Example #27
0
def denoise_im_with_back(inp):
    # estimate 'background' color by a median filter
    bg = signal.medfilt2d(inp, 11)
    save('background.png', bg)

    # compute 'foreground' mask as anything that is significantly darker than
    # the background
    mask = inp < bg - 0.1    
    save('foreground_mask.png', mask)
    back = np.average(bg);
    
    # Lets remove some splattered ink
    mod = ndimage.filters.median_filter(mask,2);
    mod = ndimage.grey_closing(mod, size=(2,2));
       
    # either return forground or average of background
       
    out = np.where(mod, inp, back)  ## 1 is pure white    
    return out;
def closing_filter(Data, size=3):
        """Use grey closing to create or modify a filtering mask. 
        
        Input can be either unfiltered data or a filtering mask.
    Data -- data array or a boolean filtering mask
    Size -- Size of closing footprint (an integer). Closing footprint is
            size*size square.
        
        Output is a filtering mask (= a boolean array, where True means
        meteorological value)
    """
    # Check for type
        if Data.dtype != 'bool':
                hcmask = Data > 1
        else:
                hcmask = Data
        footprint = ones((size, size))
        hcmaski = ndimage.grey_closing(hcmask, footprint=footprint, mode='constant')
        return hcmaski # Boolean array
Example #29
0
def label_fusion(label, win=3):
    """Apply a morphological filtering on the label to remove isolated labels.
    In case the input is a two channel label (2D ndarray of boolean of same 
    length) the labels of two channels are fused to remove
    overlaping segments of speech.
    
    :param label: input labels given in a 1D or 2D ndarray
    :param win: parameter or the morphological filters
    """
    channel_nb = len(label)
    if channel_nb == 2:
        overlap_label = numpy.logical_and(label[0], label[1])
        label[0] = numpy.logical_and(label[0], ~overlap_label)
        label[1] = numpy.logical_and(label[1], ~overlap_label)

    for idx, lbl in enumerate(label):
        cl = ndimage.grey_closing(lbl, size=win)
        label[idx] = ndimage.grey_opening(cl, size=win)

    return label
Example #30
0
def morphop(im, operation='open', radius='5'):
    """Perform a morphological operation with spherical structuring element.

    Parameters
    ----------
    im : array, shape (M, N[, P])
        2D or 3D grayscale image.
    operation : string, optional
        The operation to perform. Choices are 'opening', 'closing',
        'erosion', and 'dilation'. Imperative verbs also work, e.g.
        'dilate'.
    radius : int, optional
        The radius of the structuring element (disk or ball) used.

    Returns
    -------
    imout : array, shape (M, N[, P])
        The transformed image.

    Raises
    ------
    ValueError : if the image is not 2D or 3D.
    """
    if im.ndim == 2:
        selem = skmorph.disk(radius)
    elif im.ndim == 3:
        selem = skmorph.ball(radius)
    else:
        raise ValueError("Image input to 'morphop' should be 2D or 3D"
                         ", got %iD" % im.ndim)
    if operation.startswith('open'):
        imout = nd.grey_opening(im, footprint=selem)
    elif operation.startswith('clos'):
        imout = nd.grey_closing(im, footprint=selem)
    elif operation.startswith('dila'):
        imout = nd.grey_dilation(im, footprint=selem)
    elif operation.startswith('ero'):
        imout = nd.grey_erosion(im, footprint=selem)
    return imout
Example #31
0
def grey_closing(img, params):
    if params['footprint_shape'] == 'rectangle':
        footprint = np.ones(
            (params['footprint_size_y'], params['footprint_size_x']),
            dtype=int)
    elif params['footprint_shape'] == 'ellipse':
        a = params['footprint_size_x'] / 2
        b = params['footprint_size_y'] / 2
        x, y = np.mgrid[-ceil(a):ceil(a) + 1, -ceil(b):ceil(b) + 1]
        footprint = ((x / a)**2 + (y / b)**2 < 1) * 1

    mode = params['mode']
    cval = params['cval']
    origin = params['origin']

    return ndimage.grey_closing(img,
                                size=None,
                                footprint=footprint,
                                structure=None,
                                mode=mode,
                                cval=cval,
                                origin=origin)
def operationTask(input):
	D=operationArgumentDic
	#self.M.add_mem()#.....................................................................................................
	
	if operation=="binary_closing":	
		return ndimage.binary_closing(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
	elif operation=="binary_dilation":
		return ndimage.binary_dilation(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
	elif operation=="binary_erosion":
		return ndimage.binary_erosion(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
	elif operation=="binary_fill_holes":
		return ndimage.binary_fill_holes(input, structure=D["structure"],output=D["output"], origin=D["origin"])
	elif operation=="binary_hit_or_miss":
		return ndimage.binary_hit_or_miss(input, structure1=D["structure1"],structure2=D["structure2"],output=D["output"], origin1=D["origin1"], origin2=D["origin2"])
	elif operation=="binary_opening":
		return ndimage.binary_opening(input, structure=D["structure"], iterations=D["iterations"], output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"], brute_force=D["brute_force"])
	elif operation=="binary_propagation":			
		return ndimage.binary_propagation(input, structure=D["structure"],output=D["output"], origin=D["origin"], mask=D["mask"], border_value=D["border_value"])
	elif operation=="black_tophat":
		return ndimage.black_tophat(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="grey_dilation":
		return ndimage.grey_dilation(input, structure=D["structure"],size=D["size"], footprint=D["footprint"],output=D["output"], mode=D["mode"], cval=D["cval"], origin=D["origin"])
	elif operation=="grey_closing":
		return ndimage.grey_closing(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="grey_erosion":
		return ndimage.grey_erosion(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="grey_opening":
		return ndimage.grey_opening(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="morphological_gradient":
		return ndimage.morphological_gradient(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="morphological_laplace":
		return ndimage.morphological_laplace(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="white_tophat":
		return ndimage.white_tophat(input, structure=D["structure"], size=D["size"], footprint=D["footprint"],  output=D["output"], origin=D["origin"],mode=D["mode"], cval=D["cval"])
	elif operation=="intMultiply":
		return input*D["scalar"]
	
	else:
		return input
Example #33
0
def morphop(im, operation='open', radius='5'):
    """Perform a morphological operation with spherical structuring element.

    Parameters
    ----------
    im : array, shape (M, N[, P])
        2D or 3D grayscale image.
    operation : string, optional
        The operation to perform. Choices are 'opening', 'closing',
        'erosion', and 'dilation'. Imperative verbs also work, e.g.
        'dilate'.
    radius : int, optional
        The radius of the structuring element (disk or ball) used.

    Returns
    -------
    imout : array, shape (M, N[, P])
        The transformed image.

    Raises
    ------
    ValueError : if the image is not 2D or 3D.
    """
    if im.ndim == 2:
        selem = morphology.disk(radius)
    elif im.ndim == 3:
        selem = morphology.ball(radius)
    else:
        raise ValueError("Image input to 'morphop' should be 2D or 3D"
                         ", got %iD" % im.ndim)
    if operation.startswith('open'):
        imout = ndi.grey_opening(im, footprint=selem)
    elif operation.startswith('clos'):
        imout = ndi.grey_closing(im, footprint=selem)
    elif operation.startswith('dila'):
        imout = ndi.grey_dilation(im, footprint=selem)
    elif operation.startswith('ero'):
        imout = ndi.grey_erosion(im, footprint=selem)
    return imout
Example #34
0
def openingFilter(image, mask):
    """ Aplica o filtro de fechamento em uma imagem,
    de acordo com o tamanho da máscara passada por
    parâmetro.
    
    @param image deve ser um PIL.Image.
    @param mask string "row x cols"

    @return matriz com novos valores após aplicação do filtro
    """

    threshold = 0.8
    sumColors = numpy.float(numpy.sum(image))
    isbinary = sumColors / image.size <= 1 - threshold

    
    (row, col) = [int(dim) for dim in mask.split('x')]
    structure = [[1 for i in range(col)] for j in range(row)]

    if isbinary:
        return ndimage.binary_closing(image, structure=structure)
    else:
        return ndimage.grey_closing(image, structure=structure)
def Closing_test():
    """
    This doesn't really work ;(
    """
    model = 1.0 - LineModel([0.2, 0.2], [0.8, 0.4], h=1.0, sig=0.0025)
    close = grey_closing(model, size=(4,4))
    
    fig = plt.figure()
    ax1 = fig.add_subplot('221')
    ax2 = fig.add_subplot('222')
    ax3 = fig.add_subplot('212')

    ax1.imshow(data .T, origin= 'image', cmap=plt.cm.gray,interpolation='nearest')
    ax2.imshow(model.T, origin='image', cmap=plt.cm.gray,interpolation='nearest')
    diff = data - model

    ax3.imshow(diff .T, origin= 'image', cmap=plt.cm.gray,interpolation='nearest')

    ax1.set_title('data')
    ax2.set_title('model')
    ax3.set_title('diff')

    plt.show()
Example #36
0
 def closing(self, tissue, size=def_size):
     nd.grey_closing(self.P[tissue], size=[size,size,size], output=self.P[tissue]) 
Example #37
0
    if return_downsized: return diff
    x,y = np.ogrid[:xs,:ys]
    return diff[x/blocksizex,y/blocksizey]
    
def thresholding_on_gaussian_mean(img, params):
    img_ubyte = img_as_float(img.astype(np.uint8))
    gaussian = gaussian_filter(img_ubyte, params.get('gaussian_sigma',3))
    threshold1 = np.mean(gaussian)
    return img_ubyte*(gaussian>threshold1)
    
params = {'blocksize':(10,10), 'min_blob_size':2, 'max_blob_fraction':1/3.,'pixel_mask':np.nan,'gaussian_sigma':3}

pipeline_otsu = [lambda x, params: x*(x>0), # take advantage that addition on blackboard are white
             lambda x, params: x*(x>threshold_otsu(x)), 
             lambda x, params: abs(downsample(x, params['blocksize'], return_downsized=True)),
             lambda x, params: ndimage.grey_closing(x, size=(2, 2), structure=np.ones((2,2))), # make blobs more regular
             #lambda x, params: x > params['threshold']
            ]
pipeline_gaussian = [lambda x, params: x*(x>0), # take advantage that addition on blackboard are white
             thresholding_on_gaussian_mean, 
             lambda x, params: abs(downsample(x, params['blocksize'], return_downsized=True)),
             lambda x, params: ndimage.grey_closing(x, size=(2, 2), structure=np.ones((2,2))), # make blobs more regular
            ]

def frame_selection(img_processed, blobs, nblobs, params): 
    if nblobs < 1: return False
    return not np.any([blobs[blobs==i].size > img_processed.size * params['max_blob_fraction'] for i in xrange(1,nblobs+1)]) 

def blob_selection(blobs, nb, params):
    return blobs[blobs==nb].size > params['min_blob_size'] # only select blob larger than n (downsampled-)pixels
    
Example #38
0
def closing(P, size=def_size):
    return nd.grey_closing(P, size=[size,size,size]) 
Example #39
0
def processing(mc, radii=[[11, 11]*u.degree],
               clip_limit=None,
               histogram_clip=[0.0, 99.],
               func=np.sqrt,
               develop=None):
    """
    Image processing steps used to isolate the EUV wave from the data.  Use
    this part of AWARE to perform the image processing steps that segment
    propagating features that brighten new pixels as they propagate.

    Parameters
    ----------

    mc : sunpy.map.MapCube
    radii : list of lists. Each list contains a pair of numbers that describe the
    radius of the median filter and the closing operation
    histogram_clip
    clip_limit :
    func :
    develop :

    """

    # Define the disks that will be used on all the images.
    # The first disk in each pair is the disk that is used by the median
    # filter.  The second disk is used by the morphological closing
    # operation.
    disks = []
    for r in radii:
        e1 = (r[0]/mc[0].scale.x).to('pixel').value  # median circle radius - across wavefront
        e3 = (r[1]/mc[0].scale.x).to('pixel').value  # closing circle width - across wavefront
        disks.append([disk(e1), disk(e3)])

    # For the dump images
    rstring = ''
    for r in radii:
        z = '%i_%i__' % (r[0].value, r[1].value)
        rstring += z

    # Calculate the persistence
    new = mapcube_tools.persistence(mc)
    if develop is not None:
        develop_filepaths = {}
        filename = develop['img'] + '_persistence_mc.mp4'
        print('\nWriting persistence movie to {:s}'.format(filename))
        aware_utils.write_movie(new, filename)

        filename = develop['dat'] + '_persistence_mc.pkl'
        develop_filepaths['persistence_mc'] = filename
        print('\nWriting persistence mapcube to {:s}'.format(filename))
        f = open(filename, 'wb')
        pickle.dump(new, f)
        f.close()

    # Calculate the running difference
    new = mapcube_tools.running_difference(new)
    if develop is not None:
        filename = develop['img'] + '_rdpi_mc.mp4'
        print('\nWriting RDPI movie to {:s}'.format(filename))
        aware_utils.write_movie(new, filename)

        filename = develop['dat'] + '_rdpi_mc.pkl'
        develop_filepaths['rdpi_mc'] = filename
        print('\nWriting RDPI mapcube to {:s}'.format(filename))
        f = open(filename, 'wb')
        pickle.dump(new, f)
        f.close()

    # Storage for the processed mapcube.
    new_mc = []

    # Only want positive differences, so everything lower than zero
    # should be set to zero
    mc_data = func(new.as_array())
    mc_data[mc_data < 0.0] = 0.0

    # Clip the data to be within a range, and then normalize it.
    if clip_limit is None:
        cl = np.nanpercentile(mc_data, histogram_clip)
    mc_data[mc_data > cl[1]] = cl[1]
    mc_data = (mc_data - cl[0]) / (cl[1]-cl[0])

    # Get rid of NaNs
    nans_here = np.logical_not(np.isfinite(mc_data))
    nans_replaced = deepcopy(mc_data)
    nans_replaced[nans_here] = 0.0

    # Clean the data to isolate the wave front.  Use three dimensional
    # operations from scipy.ndimage.  This approach should get rid of
    # more noise and have better continuity in the time-direction.
    final = np.zeros_like(mc_data, dtype=np.float32)

    # Do the cleaning and isolation operations on multiple length-scales,
    # and add up the final results.
    for j, d in enumerate(disks):
        pancake = np.swapaxes(np.tile(d[0], (3, 1, 1)), 0, -1)
        nr = deepcopy(nans_replaced)

        print('\n', nr.shape, pancake.shape, '\n', 'started median filter.')
        nr = 1.0*median_filter(nr, footprint=pancake)
        if develop is not None:
            filename = develop['dat'] + '_np_median_dc_{:n}.npy'.format(j)
            develop_filepaths['np_median_dc'] = filename
            print('\nWriting results of median filter to {:s}'.format(filename))
            f = open(filename, 'wb')
            np.save(f, nr)
            f.close()

        print(' started grey closing.')
        nr = 1.0*grey_closing(nr, footprint=pancake)
        if develop is not None:
            filename = develop['dat'] + '_np_closing_dc_{:n}.npy'.format(j)
            develop_filepaths['np_closing_dc'] = filename
            print('\nWriting results of closing to {:s}'.format(filename))
            f = open(filename, 'wb')
            np.save(f, nr)
            f.close()

        # Sum all the
        final += nr*1.0

    # If in development mode, now dump out the meta's and the nans
    if develop:
        filename = develop['dat'] + '_np_meta.pkl'
        develop_filepaths['np_meta'] = filename
        print('\nWriting all meta data information to {:s}'.format(filename))
        f = open(filename, 'wb')
        pickle.dump(mc.all_meta(), f)
        f.close()
        filename = develop['dat'] + '_np_nans.npy'
        develop_filepaths['np_nans'] = filename
        print('\nWriting all nans to {:s}'.format(filename))
        f = open(filename, 'wb')
        np.save(f, nans_here)
        f.close()

    # Create the list that will be turned in to a mapcube
    for i, m in enumerate(new):
        new_mc.append(Map(ma.masked_array(final[:, :, i],
                                          mask=nans_here[:, :, i]),
                          m.meta))

    # Return the cleaned mapcube
    if develop:
        return Map(new_mc, cube=True), develop_filepaths
    else:
        return Map(new_mc, cube=True)
def closing(f, b=bm.create_structure_element_cross()):
    return mm.grey_closing(f, structure=b)
Example #41
0
    def watershed_cube(self):
        writeVerbose = False;
        #writeVerbose = self.dpWatershedTypes_verbose
        readVerbose = False;
        #readVerbose = self.dpWatershedTypes_verbose

        # load the probability data, allocate as array of volumes instead of 4D ndarray to maintain C-order volumes
        probs = [None]*self.ntypes; bwseeds = [None]*self.nfg_types
        if self.srclabels:
            # this code path is typically not used in favor of the label checker for fully labeled 3d gt components.
            # but, some ground truth (for example, 2d ECS cases) was only labeled with voxel type,
            #   so this is used to create ground truth components from the voxel types.
            loadh5 = emLabels.readLabels(srcfile=self.srclabels, chunk=self.chunk.tolist(), offset=self.offset.tolist(),
                size=self.size.tolist(), data_type='uint16', verbose=writeVerbose)
            self.datasize = loadh5.datasize; self.chunksize = loadh5.chunksize; self.attrs = loadh5.data_attrs
            # pre-allocate for srclabels method, labeled areas are set to prob of 1 below
            for i in range(self.ntypes): probs[i] = np.zeros(self.size, dtype=emProbabilities.PROBS_DTYPE, order='C')
            if self.TminSrc < 2:
                # simple method with no "cleaning"
                for i in range(self.ntypes): probs[i][loadh5.data_cube==i] = 1
            else:
                # optionally "clean" labels by removing small bg and fg components for each foreground type
                fgbwlabels = np.zeros(self.size, dtype=np.bool)
                for i in range(self.nfg_types):
                    # background connected components and threshold
                    comps, nlbls = nd.measurements.label(loadh5.data_cube!=i+1)
                    comps, sizes = emLabels.thresholdSizes(comps, minSize=self.TminSrc)
                    # foreground connected components and threshold
                    comps, nlbls = nd.measurements.label(comps==0)
                    comps, sizes = emLabels.thresholdSizes(comps, minSize=self.TminSrc)
                    # keep track of mask for all foreground types
                    bwlabels = (comps > 0); fgbwlabels = np.logical_or(fgbwlabels, bwlabels)
                    probs[i+1][bwlabels] = 1
                # set background type as all areas that are not in foreground types after "cleaning"
                probs[0][np.logical_not(fgbwlabels)] = 1
        else:
            # check if background is in the prob file
            hdf = h5py.File(self.probfile,'r'); has_bg = self.bg_type in hdf; hdf.close()
            for i in range(0 if has_bg else 1, self.ntypes):
                loadh5 = dpLoadh5.readData(srcfile=self.probfile, dataset=self.types[i], chunk=self.chunk.tolist(),
                    offset=self.offset.tolist(), size=self.size.tolist(), data_type=emProbabilities.PROBS_STR_DTYPE,
                    verbose=readVerbose)
                self.datasize = loadh5.datasize; self.chunksize = loadh5.chunksize; self.attrs = loadh5.data_attrs
                probs[i] = loadh5.data_cube; del loadh5
            # if background was not in hdf5 then create it as 1-sum(fg type probs)
            if not has_bg:
                probs[0] = np.ones_like(probs[1])
                for i in range(1,self.ntypes): probs[0] -= probs[i]
                #assert( (probs[0] >= 0).all() ) # comment for speed
                probs[0][probs[0] < 0] = 0 # rectify

        # save some of the parameters as attributes
        self.attrs['types'] = self.types; self.attrs['fg_types'] = self.fg_types
        self.attrs['fg_types_labels'] = self.fg_types_labels

        # save connnetivity structure and warping LUT because used on each iteration (for speed)
        self.bwconn = nd.morphology.generate_binary_structure(dpLoadh5.ND, self.connectivity)
        self.bwconn2d = self.bwconn[:,:,1]; self.simpleLUT = None

        # load the warpings if warping mode is enabled
        warps = None
        if self.warpfile:
            warps = [None]*self.nwarps
            for i in range(self.nwarps):
                loadh5 = dpLoadh5.readData(srcfile=self.warpfile, dataset=self.warp_datasets[i],
                    chunk=self.chunk.tolist(), offset=self.offset.tolist(), size=self.size.tolist(),
                    verbose=readVerbose)
                warps[i] = loadh5.data_cube; del loadh5

        # xxx - may need to revisit cropping, only intended to be used with warping method.
        if self.docrop: c = self.cropborder; s = self.size  # DO NOT use variables c or s below

        # optionally apply filters in attempt to fill small background (membrane) probability gaps.
        if self.close_bg > 0:
            # create structuring element
            n = 2*self.close_bg + 1; h = self.close_bg; strel = np.zeros((n,n,n),dtype=np.bool); strel[h,h,h]=1;
            strel = nd.binary_dilation(strel,iterations=self.close_bg)

            # xxx - this was the only thing tried here that helped some but didn't work well against the skeletons
            probs[0] = nd.grey_closing( probs[0], structure=strel )
            for i in range(self.nfg_types): probs[i+1] = nd.grey_opening( probs[i+1], structure=strel )
            # xxx - this gave worse results
            #probs[0] = nd.maximum_filter( probs[0], footprint=strel )
            # xxx - this had almost no effect
            #probs[0] = nd.grey_closing( probs[0], structure=strel )

        # argmax produces the winner-take-all assignment for each supervoxel.
        # background type was put first, so voxType of zero is background (membrane).
        voxType = np.concatenate([x.reshape(x.shape + (1,)) for x in probs], axis=3).argmax(axis=3)
        # write out the winning type for each voxel
        # save some params from this watershed run in the attributes
        d = self.attrs.copy(); d['thresholds'] = self.Ts; d['Tmins'] = self.Tmins
        data = voxType.astype(emVoxelType.VOXTYPE_DTYPE)
        if self.docrop: data = data[c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]]
        emVoxelType.writeVoxType(outfile=self.outlabels, chunk=self.chunk.tolist(),
            offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
            chunksize=self.chunksize.tolist(), verbose=writeVerbose, attrs=d,
            data=data)

        # only allow a voxel to be included in the type of component that had max prob for that voxel.
        # do this by setting the non-winning probabilities to zero.
        for i in range(self.ntypes): probs[i][voxType != i] = 0;

        # create a type mask for each foreground type to select only current voxel type (winner-take-all from network)
        voxTypeSel = [None] * self.nfg_types; voxTypeNotSel =  [None] * self.nfg_types
        for i in range(self.nfg_types):
            voxTypeSel[i] = (voxType == i+1)
            # create an inverted version, only used for complete fill not for warping (which requires C-contiguous),
            #   so apply crop here if cropping enabled
            voxTypeNotSel[i] = np.logical_not(voxTypeSel[i])
            if self.docrop: voxTypeNotSel[i] = voxTypeNotSel[i][c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]]

        # need C-contiguous probabilities for binary_warping.
        for i in range(self.nfg_types):
            if not probs[i+1].flags.contiguous or np.isfortran(probs[i+1]):
                probs[i+1] = np.ascontiguousarray(probs[i+1])

        # iteratively apply thresholds, each time only keeping components that have fallen under size Tmin.
        # at last iteration keep all remaining components.
        # do this separately for foreground types.
        for k in range(self.nTmin):
            for i in range(self.nfg_types): bwseeds[i] = np.zeros(self.size, dtype=np.bool, order='C')
            for i in range(self.nthresh):
                if self.dpWatershedTypes_verbose:
                    print('creating supervoxels at threshold = %.8f with Tmin = %d' % (self.Ts[i], self.Tmins[k]))
                    t = time.time()
                types_labels = [None]*self.nfg_types; types_uclabels = [None]*self.nfg_types;
                if self.skeletonize: types_sklabels = [None]*self.nfg_types
                types_nlabels = np.zeros((self.nfg_types,),dtype=np.int64)
                types_ucnlabels = np.zeros((self.nfg_types,),dtype=np.int64)
                for j in range(self.nfg_types):
                    # run connected components at this threshold on labels
                    labels, nlabels = nd.measurements.label(probs[j+1] > self.Ts[i], self.bwconn)

                    # merge the current thresholded components with the previous seeds to get current bwlabels
                    bwlabels = np.logical_or(labels, bwseeds[j])

                    # take the current components under threshold and merge with the seeds for the next iteration
                    if i < self.nthresh-1:
                        labels, sizes = emLabels.thresholdSizes(labels, minSize=-self.Tmins[k])
                        bwseeds[j] = np.logical_or(labels, bwseeds[j])

                    # this if/elif switch determines the main method for creating the labels.
                    # xxx - make cropping to be done in more efficient way, particular to avoid filling cropped areas
                    if self.method == 'overlap':
                        # definite advantage to this method over other methods, but cost is about 2-3 times slower.
                        # labels are linked per zslice using precalculated slice to slice warpings based on the probs.
                        labels, nlabels = self.label_overlap(bwlabels, voxTypeSel[j], warps)

                        # xxx - add switches to only optionally export the unconnected labels
                        #uclabels = labels; ucnlabels = nlabels;

                        # crop right after the labels are created and stay uncropped from here.
                        # xxx - labels will be wrong unless method implicitly handled the cropping during the labeling.
                        #   currently only the warping method is doing, don't need cropping for other methods anyways.
                        if self.docrop: labels = labels[c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]]

                        # this method can not create true unconnected 3d labels, but should be unconnected in 2d.
                        # NOTE: currently this only removes 6-connectivity, no matter what specified connecitity is
                        # xxx - some method of removing adjacencies with arbitrary connectivity?
                        uclabels, ucnlabels = emLabels.remove_adjacencies(labels)
                    elif self.method == 'skim-ws':
                        # xxx - still trying to evaluate if there is any advantage to this more traditional watershed.
                        #   it does not leave a non-adjacency boundary and is about 1.5 times slower than bwmorph

                        # run connected components on the thresholded labels merged with previous seeds
                        labels, nlabels = nd.measurements.label(bwlabels, self.bwconn)

                        # run a true watershed based the current foreground probs using current components as markers
                        labels = morph.watershed(probs[j+1], labels, connectivity=self.bwconn, mask=voxTypeSel[j])

                        # remove any adjacencies created during the watershed
                        # NOTE: currently this only removes 6-connectivity, no matter what specified connecitity is
                        # xxx - some method of removing adjacencies with arbitrary connectivity?
                        uclabels, ucnlabels = emLabels.remove_adjacencies(labels)
                    else:
                        if self.method == 'comps-ws' and i>1:
                            # this is an alternative to the traditional watershed that warps out only based on stepping
                            #   back through the thresholds in reverse order. has advantages of non-connectivity.
                            # may help slightly for small supervoxels but did not show much improved metrics in
                            #   terms of large-scale connectivity (against skeletons)
                            # about 4-5 times slower than regular warping method.

                            # make an unconnected version of bwlabels by warping out but with mask only for this type
                            # everything above current threshold is already labeled, so only need to use gray thresholds
                            #    starting below the current threshold level.
                            bwlabels, diff, self.simpleLUT = binary_warping(bwlabels, np.ones(self.size,dtype=np.bool),
                                mask=voxTypeSel[j], borderval=False, slow=True, simpleLUT=self.simpleLUT,
                                connectivity=self.connectivity, gray=probs[j+1],
                                grayThresholds=self.Ts[i-1::-1].astype(np.float32, order='C'))
                        else:
                            assert( self.method == 'comps' )     # bad method option
                            # make an unconnected version of bwlabels by warping out but with mask only for this type
                            bwlabels, diff, self.simpleLUT = binary_warping(bwlabels, np.ones(self.size,dtype=np.bool),
                                mask=voxTypeSel[j], borderval=False, slow=True, simpleLUT=self.simpleLUT,
                                connectivity=self.connectivity)

                        # run connected components on the thresholded labels merged with previous seeds (warped out)
                        uclabels, ucnlabels = nd.measurements.label(bwlabels, self.bwconn);

                        # in this case the normal labels are the same as the unconnected labels because of warping
                        labels = uclabels; nlabels = ucnlabels;

                    # optionally make a skeletonized version of the unconnected labels
                    # xxx - revisit this, currently not being used for anything, started as a method to skeletonize GT
                    if self.skeletonize:
                        # method to skeletonize using max range endpoints only
                        sklabels, sknlabels = emLabels.ucskeletonize(uclabels, mask=voxTypeSel[j],
                            sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None)
                        assert( sknlabels == ucnlabels )

                    # fill out these labels out so that they fill in remaining voxels based on voxType.
                    # this uses bwdist method for finding nearest neighbors, so connectivity can be violoated.
                    # this is mitigated by first filling out background using the warping transformation
                    #   (or watershed) above, then this step is only to fill in remaining voxels for the
                    #   current foreground voxType.
                    labels = emLabels.nearest_neighbor_fill(labels, mask=voxTypeNotSel[j],
                        sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None)

                    # save the components labels generated for this type
                    types_labels[j] = labels.astype(emLabels.LBLS_DTYPE, copy=False);
                    types_uclabels[j] = uclabels.astype(emLabels.LBLS_DTYPE, copy=False);
                    types_nlabels[j] = nlabels if self.fg_types_labels[j] < 0 else 1
                    types_ucnlabels[j] = ucnlabels if self.fg_types_labels[j] < 0 else 1
                    if self.skeletonize: types_sklabels[j] = sklabels.astype(emLabels.LBLS_DTYPE, copy=False)

                # merge the fg components labels. they can not overlap because voxel type is winner-take-all.
                nlabels = 0; ucnlabels = 0;
                labels = np.zeros(self.size_crop, dtype=emLabels.LBLS_DTYPE);
                uclabels = np.zeros(self.size_crop, dtype=emLabels.LBLS_DTYPE);
                if self.skeletonize: sklabels = np.zeros(self.size, dtype=emLabels.LBLS_DTYPE);
                for j in range(self.nfg_types):
                    sel = (types_labels[j] > 0); ucsel = (types_uclabels[j] > 0);
                    if self.skeletonize: sksel = (types_sklabels[j] > 0);
                    if self.fg_types_labels[j] < 0:
                        labels[sel] += (types_labels[j][sel] + nlabels);
                        uclabels[ucsel] += (types_uclabels[j][ucsel] + ucnlabels);
                        if self.skeletonize: sklabels[sksel] += (types_sklabels[j][sksel] + ucnlabels);
                        nlabels += types_nlabels[j]; ucnlabels += types_ucnlabels[j];
                    else:
                        labels[sel] = self.fg_types_labels[j];
                        uclabels[ucsel] = self.fg_types_labels[j];
                        if self.skeletonize: sklabels[sksel] = self.fg_types_labels[j]
                        nlabels += 1; ucnlabels += 1;

                if self.dpWatershedTypes_verbose:
                    print('\tnlabels = %d' % (nlabels,))
                    #print('\tnlabels = %d %d' % (nlabels,labels.max())) # for debug only
                    #assert(nlabels == labels.max()) # sanity check for non-overlapping voxTypeSel, comment for speed
                    print('\tdone in %.4f s' % (time.time() - t,))

                # make a fully-filled out version using bwdist nearest foreground neighbor
                wlabels = emLabels.nearest_neighbor_fill(labels, mask=None,
                    sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None)

                # write out the results
                if self.nTmin == 1: subgroups = ['%.8f' % (self.Ts[i],)]
                else: subgroups = ['%d' % (self.Tmins[k],), '%.8f' % (self.Ts[i],)]
                d = self.attrs.copy(); d['threshold'] = self.Ts[i];
                d['types_nlabels'] = types_nlabels; d['Tmin'] = self.Tmins[k]
                emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                    offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                    chunksize=self.chunksize.tolist(), data=labels, verbose=writeVerbose,
                    attrs=d, strbits=self.outlabelsbits, subgroups=['with_background']+subgroups )
                emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                    offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                    chunksize=self.chunksize.tolist(), data=wlabels, verbose=writeVerbose,
                    attrs=d, strbits=self.outlabelsbits, subgroups=['zero_background']+subgroups )
                d['type_nlabels'] = types_ucnlabels;
                emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                    offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                    chunksize=self.chunksize.tolist(), data=uclabels, verbose=writeVerbose,
                    attrs=d, strbits=self.outlabelsbits, subgroups=['no_adjacencies']+subgroups )
                if self.skeletonize:
                    emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                        offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                        chunksize=self.chunksize.tolist(), data=sklabels, verbose=writeVerbose,
                        attrs=d, strbits=self.outlabelsbits, subgroups=['skeletonized']+subgroups )
Example #42
0
# End of Function Definitions for CLAHE ---------------------------------------

#apply CLAHE on median filtered l channel for increased contrast before data analysis
l_chan_clahe = equalize_adapthist(l_chan_med, ntiles_x=8, ntiles_y=8, clip_limit=0.01,
                       nbins=256)

#show fully pre-processed image
#list of variables for reference: rgb, lab, l_chan1, l_chan_med, l_chan_clahe
skimage.io.imshow(l_chan_clahe)


#start removal of Optic Disk (big white circle where blood vessels branch out from)

#apply grayscale morphological closing 
l_close = ndi.grey_closing(l_chan1, size = (10,10))

skimage.io.imshow(l_close)

#binarize by thresholding to try and segment optic disk
"""
I'm trying to work out here how to perfom the binary thresold and invert/overly  
the output marker image - however, my plot histograms end up returning all white 
and neither otsu nor adaptive give me what I want, can you help me generate a 
similar output to figure 3(b) from the paper from Sopharak? thanks
"""


#image = l_close

#block_size = 1000
def closing(f, b):
    return mm.grey_closing(f, structure=b)
Example #44
0
def local_minima(array2d):
    return ((array2d <= np.roll(array2d,  1, 0)) &
            (array2d <= np.roll(array2d, -1, 0)) &
            (array2d <= np.roll(array2d,  1, 1)) &
            (array2d <= np.roll(array2d, -1, 1)))

data_scaled=data#[0::5,0::5]
#data_min = data_scaled
#mask_min = local_minima(data_scaled)
#data_scaled[~mask_min] = np.NAN

#remove small holes
maxit = 3
for it in range(0,maxit):
    data_open = ndimage.grey_closing(data_scaled,size=(3,3))
print "small holes filled"
#data_open = ndimage.grey_opening(data_scaled,size=(3,3))
data_open = filters.rank_filter(data_open,3,3)
#data_open = filters.gaussian_filter(data_open,sigma=5/2.0)
#data_open[np.isnan(data_open)]=-9999

#zfy = float(data.shape[0])/data_open.shape[0]
#zfx = float(data.shape[1])/data_open.shape[1]

#data_out = scipy.misc.imresize(data_open,(data.shape[0],data.shape[1]),interp='bilinear').astype(np.float_)
#data_out=ndimage.interpolation.zoom(data_open,(zfy,zfx))

#local_min v2
#set bad_matches to nan
#bad_matches=energy>0.7
image_label_overlay = label2rgb(labeled_particles, bg_label=0)
# clear_border(filtered_image)

new_image = image.copy()
new_image[labeled_particles == 0] = 0
# distance = ndi.distance_transform_edt(sobel(new_image))
distance = ndi.distance_transform_edt(filtered_image)
# distance[distance > 0.7*np.max(distance)] = 0.7*np.max(distance)
local_maxi = peak_local_max(distance, indices=False, footprint=morphology.square(5),
                            labels=filtered_image, exclude_border=False)
markers = ndi.label(local_maxi)[0]
# plt.imshow(distance, cmap=plt.cm.gray, interpolation="nearest")
# plt.show()

# labels = morphology.watershed(-distance, markers, mask=filtered_image)
closed_elevation = ndi.grey_closing(sobel(new_image),size=4)
labels = morphology.watershed(-distance, markers)
image_label_overlay3 = label2rgb(labels, bg_label=0)
image_label_overlay3[find_boundaries(labels)] = [0,0,0]

# filter truth image
truth2 = np.zeros_like(truth)
truth2[truth < 100] = 1
truth2[truth > 100] = 0

# label truth features
labeled_particles2, num_features2 = ndi.label(truth2)
image_label_overlay2 = label2rgb(labeled_particles2, bg_label=0)

# set up plot window
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(30, 10), sharex=True, sharey=True)