Esempio n. 1
0
def test_multiple_modes():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying a single mode.
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    mode1 = 'reflect'
    mode2 = ['reflect', 'reflect']

    assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
                 sndi.gaussian_filter(arr, 1, mode=mode2))
    assert_equal(sndi.prewitt(arr, mode=mode1),
                 sndi.prewitt(arr, mode=mode2))
    assert_equal(sndi.sobel(arr, mode=mode1),
                 sndi.sobel(arr, mode=mode2))
    assert_equal(sndi.laplace(arr, mode=mode1),
                 sndi.laplace(arr, mode=mode2))
    assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
                 sndi.gaussian_laplace(arr, 1, mode=mode2))
    assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
                 sndi.maximum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
                 sndi.minimum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
                 sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
    assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
                 sndi.uniform_filter(arr, 5, mode=mode2))
Esempio n. 2
0
def mexF2d(a, sigma2d):
    """
    calculate -nd.gaussian_laplace sectionwise, using
    output=None, mode="reflect", cval=0.0

    forcing float32 output
    """
    from scipy.ndimage import gaussian_laplace
    out = zeroArrF(a.shape)

    for tup in N.ndindex(a.shape[:-2]):
        gaussian_laplace(a[tup], sigma2d, output=out[tup], mode="reflect", cval=0.0)
        out[tup] *= -1

    return out
Esempio n. 3
0
def wall_points_phc_1(img, refsx, sigma, extra):
    """Get 4 reference points with coordinates on pipette walls
    for Phase Contrast images
    (walls are inner inflection points of inner minima on the profile)
    """
    extra_walls = []
    refs = np.array([])
    refs_err = np.array([])
    for refx in refsx:
        if extra:
            extra_wall = {}
        jumps = jumps_pos_steep(ndimage.gaussian_laplace(img[:, refx], sigma).astype(float), 8)
        y = img[:, refx]

        fit = find_peak(y, jumps[1] + np.argmin(y[jumps[1] : jumps[2]]))
        a, b, x0, s = fit[0]
        refs = np.append(refs, np.asarray((x0 + s, refx)), axis=1)
        # taking err_x0+err_s while ref = x0+s
        refs_err = np.append(refs_err, sum(sqrt(np.diag(fit[1])[2:])))
        if extra:
            extra_wall["fit1"] = fit[0]

        fit = find_peak(y, jumps[5] + np.argmin(y[jumps[5] : jumps[6]]))
        a, b, x0, s = fit[0]
        refs = np.append(refs, np.asarray((x0 - s, refx)), axis=1)
        refs_err = np.append(refs_err, sum(sqrt(np.diag(fit[1])[2:])))
        if extra:
            extra_wall["fit2"] = fit[0]
            extra_wall["profile"] = y
            extra_walls.append(extra_wall)
    return refs.reshape(4, 2), refs_err, extra_walls
Esempio n. 4
0
 def remove_dust(self):
     self.seq = []
     for i in xrange(0,self.n_frames):
         # greyscale conversion
         img = np.average(self.reader.get_data(i),axis=2)
         self.seq.append(img)
     self.seq = np.array(self.seq)
     #var = np.var(self.seq, axis=0)
     #min = np.min(self.seq, axis=0)
     #max = np.max(self.seq, axis=0)
     #delta = max - min
     #var = stats.variation(self.seq, axis=0)
     #gmean = stats.gmean(self.seq, axis=0)
     a = np.average(self.seq, axis=0)
     #grad = ndimage.gaussian_gradient_magnitude(a , 0.25)
     #map = ndimage.prewitt(a)
     map = ndimage.gaussian_laplace(a,2.5) * ndimage.gaussian_gradient_magnitude(a , 0.25)
     cutoff = np.percentile(map,99.9)
     map[map<cutoff]=0
     map[map>0]=1
     #map = grad
     #map[map>300]=300
     fig = plt.figure(figsize=(20,8), frameon=False)
     fig.subplots_adjust(hspace=0)
     fig.subplots_adjust(wspace=0)
     ax1 = fig.add_subplot(1, 2, 1)
     ax1.imshow(map,interpolation='nearest')
     ax1.set_title('variance')
     ax2 = fig.add_subplot(1, 2, 2)
     ax2.imshow(self.seq[0], cmap='Greys_r',interpolation='nearest')
     ax2.set_title('img')
     fig.set_tight_layout(True)
     plt.show()
Esempio n. 5
0
def texton_visualization_filters(resolution, sigma, scales):
    impulse = np.zeros((resolution,resolution))
    impulse[resolution/2 + 1][resolution/2 + 1] = 1

    filters = []
    filters.append(gaussian_filter(impulse, sigma))
    filters.append(gaussian_laplace(impulse, sigma))
    for _ord in [1,2]:
        for scale in scales:
            filters.append(gaussian_filter(impulse, scale, order=[_ord,0]))

    return filters
Esempio n. 6
0
def sharpen_2pimage(image, laplace_sigma=0.7, low_percentile=3, high_percentile=99.9):
    """ Apply a laplacian filter, clip pixel range and normalize.

    :param np.array image: Array with raw two-photon images.
    :param float laplace_sigma: Sigma of the gaussian used in the laplace filter.
    :param float low_percentile, high_percentile: Percentiles at which to clip.

    :returns: Array of same shape as input. Sharpened image.
    """
    sharpened = image - ndimage.gaussian_laplace(image, laplace_sigma)
    clipped = np.clip(sharpened, *np.percentile(sharpened, [low_percentile, high_percentile]))
    norm = (clipped - clipped.mean()) / (clipped.max() - clipped.min() + 1e-7)
    return norm
Esempio n. 7
0
def test_multiple_modes_gaussian_laplace():
    # Test gaussian_laplace filter for multiple extrapolation modes
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
                         [-0.36630503, -0.20069774, 0.07483620],
                         [0.15849176, 0.18495566, 0.21934094]])

    modes = ['reflect', 'wrap']

    assert_almost_equal(expected,
                        sndi.gaussian_laplace(arr, 1, mode=modes))
Esempio n. 8
0
def generate_filters(sigma, scales, angles):
    """

    Parameters
    ----------
    sigma: scalar
    The scale of the Gaussian and the Laplacian of Gaussian filters.
    scales: list of tuples
    The scales of the anisotropic gaussian derivative filters.
    angles: list of scalars
    The angles of the anisotropic gaussian derivative filters.
    
    Returns
    ----------
    filters: list of functions
    A list of functions which are the filters. These functions
    take a single image as parameter and return a filter response.
    
    """
    filters = []
    
    # Gaussian filter
    gauss_filter = lambda I: gaussian_filter(I, sigma)
    filters.append(gauss_filter)
    
    # Laplacian of Gaussian filter
    laplacian_filter = lambda I: gaussian_laplace(I, sigma)
    filters.append(laplacian_filter)
    
    # Edge filters (first order Gaussian derivative)
    for scale in scales:
        filters.append(
          lambda I, scale=scale: \
          max_anisotropic_derivative_filter(I, scale, angles, 1))
        
    # Bar filters (second order Gaussian derivative)
    for scale in scales:
        filters.append(
          lambda I, scale=scale: \
          max_anisotropic_derivative_filter(I, scale, angles, 2))

    return filters
Esempio n. 9
0
    def update_features(self, img_array, index, use_memory):

        if use_memory and (index in self.feats_dictionary):
            self.integral_img, self.avg_rc, self.avg_gc, self.avg_bc, self.avg_rc_h, \
            self.avg_gc_h, self.avg_bc_h, self.gauss1rc, self.gauss1gc, self.gauss1bc,\
            self.gauss35rc,self.gauss35gc, self.gauss35bc, self.log2rc, self.log2gc, self.log2bc,\
            self.log35rc, self.log35gc, self.log35bc = self.feats_dictionary[index]
            return

        img_array = mirror_borders(img_array, self.patch_size // 2)

        # integral image
        self.integral_img = cv2.integral(img_array[:,:,0])

        # average image red and green channel patch size
        self.avg_rc = cv2.blur(img_array[:,:,0], (self.patch_size, self.patch_size))
        self.avg_gc = cv2.blur(img_array[:,:,1], (self.patch_size, self.patch_size))
        self.avg_bc = cv2.blur(img_array[:,:,2], (self.patch_size, self.patch_size))

        # average images all three channels
        self.avg_rc_h = cv2.blur(img_array[:,:,0], (self.patch_size//2, self.patch_size//2))
        self.avg_gc_h = cv2.blur(img_array[:,:,1], (self.patch_size//2, self.patch_size//2))
        self.avg_bc_h = cv2.blur(img_array[:,:,2], (self.patch_size//2, self.patch_size//2))

        # gassiuan smoothed sigma 1
        self.gauss1rc = nd.gaussian_filter(img_array[:,:,0], 1)
        self.gauss1gc = nd.gaussian_filter(img_array[:,:,1], 1)
        self.gauss1bc = nd.gaussian_filter(img_array[:,:,2], 1)

        # gaussian smoothed sigma 3.5
        self.gauss35rc = nd.gaussian_filter(img_array[:, :, 0], 3.5)
        self.gauss35gc = nd.gaussian_filter(img_array[:, :, 1], 3.5)
        self.gauss35bc = nd.gaussian_filter(img_array[:, :, 2], 3.5)

        # laplace of gaussian sigma 2 (all three chaannels)
        self.log2rc = nd.gaussian_laplace(img_array[:,:,0], 2)
        self.log2gc = nd.gaussian_laplace(img_array[:,:,1], 2)
        self.log2bc = nd.gaussian_laplace(img_array[:,:,2], 2)

        # laplace of gaussian sigma 3.5
        self.log35rc = nd.gaussian_laplace(img_array[:,:,0], 3.5)
        self.log35gc = nd.gaussian_laplace(img_array[:,:,1], 3.5)
        self.log35bc = nd.gaussian_laplace(img_array[:,:,2], 3.5)

        if use_memory:
            # add the computed features to the dictionary
            self.feats_dictionary[index] = self.integral_img, self.avg_rc, self.avg_gc, self.avg_bc, self.avg_rc_h,\
                self.avg_gc_h, self.avg_bc_h, self.gauss1rc, self.gauss1gc, self.gauss1bc, self.gauss35rc, self.gauss35gc,\
                self.gauss35bc, self.log2rc, self.log2gc, self.log2bc, self.log35rc, self.log35gc, self.log35bc
Esempio n. 10
0
def test_gaussian_truncate():
    # Test that Gaussian filters can be truncated at different widths.
    # These tests only check that the result has the expected number
    # of nonzero elements.
    arr = np.zeros((100, 100), float)
    arr[50, 50] = 1
    num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
    assert_equal(num_nonzeros_2, 21**2)
    num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
    assert_equal(num_nonzeros_5, 51**2)

    # Test truncate when sigma is a sequence.
    f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
    fpos = f > 0
    n0 = fpos.any(axis=0).sum()
    # n0 should be 2*int(2.5*3.5 + 0.5) + 1
    assert_equal(n0, 19)
    n1 = fpos.any(axis=1).sum()
    # n1 should be 2*int(0.5*3.5 + 0.5) + 1
    assert_equal(n1, 5)

    # Test gaussian_filter1d.
    x = np.zeros(51)
    x[25] = 1
    f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
    n = (f > 0).sum()
    assert_equal(n, 15)

    # Test gaussian_laplace
    y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
    nonzero_indices = np.nonzero(y != 0)[0]
    n = nonzero_indices.ptp() + 1
    assert_equal(n, 15)

    # Test gaussian_gradient_magnitude
    y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
    nonzero_indices = np.nonzero(y != 0)[0]
    n = nonzero_indices.ptp() + 1
    assert_equal(n, 15)
Esempio n. 11
0
def gaussian_Laplace1(left):
    img = []
    for i in range(left.shape[0]):
        img.append(ndimage.gaussian_laplace(left[i, :, :], sigma=1))
    return np.asarray(img)
Esempio n. 12
0
def findLogZeros(image, logKernelSize, gradKernelSize, gradstrength,
                 gradmethod='any'):
    ''' improved implementation of find log zeros.
    use np.diff of np.signbit to detect sign changes.
    about 20 times faster than a loop like:
        for y in range(ySize):
            for x in range(xSize):

    Also, keep track of positive gradient and negative gradient, and shift
    appropriately so all edges are on the negative side of a zero contour,
    rather than always appearing below/right of a zero, as is default behavior
    for np.diff and the old for-loop logZeros method.

    to find all zeros, set gradstrength to 0.

    gradmethod:
        'any' returns all edges where any pixel is greater than gradstrength
        'mean' returns all edges where the average gradient is greater than
            gradsthrength. 'mean' may be computationally quite taxing.

    to find typical cell edges from a brightfield image,
    logKernelSize = 4
    gradKernelSize = 2
    gradstrength = 0.05
    gradmethod = 'any'

    to find typical fluorescence features,
    logKernelSize = 2.5
    gradKernelSize = 2.5
    gradstrength = 0.05
    gradmethod = 'mean'
    '''
    # find raw zeros of the laplace of gaussian convolution of the input image
    scaledImage = ((image.astype('float64') - image.min()) /
                   (image.max() - image.min()))
    #laplace of gaussian
    log = ndimage.gaussian_laplace(scaledImage, logKernelSize)
    # initialize for zeros of laplace of gaussian
    logZeros = np.zeros(log.shape, np.bool)
    xZerosRaw = np.diff(np.signbit(log).astype(int),axis=1) # row zeros
    yZerosRaw = np.diff(np.signbit(log).astype(int),axis=0) # column zeros
    # find the indices for left, right, top and bottom edges
    leftZerosIdx = np.where(xZerosRaw==1)
    rightZerosIdx = np.where(xZerosRaw==-1)
    topZerosIdx = np.where(yZerosRaw==1)
    bottomZerosIdx = np.where(yZerosRaw==-1)
    # left and top zeros must be shifted by one column/row respectively
    logZeros[:,1:][leftZerosIdx] = True
    logZeros[1:,:][topZerosIdx] = True
    # right and bottom zeros can be added directly
    logZeros[rightZerosIdx] = True
    logZeros[bottomZerosIdx] = True
    # filter by gradient, treating connected pixels as a single edge, and
    # discarding any edge as specified by gradmethod
    grad = ndimage.gaussian_gradient_magnitude(scaledImage, gradKernelSize)
    lbl_logZeros, nEdges = ndimage.label(logZeros,
                                         [[1, 1, 1],
                                          [1, 1, 1],
                                          [1, 1, 1]])
    if gradmethod == 'mean':
        for edge in range(nEdges):
            if grad[lbl_logZeros==edge].mean() < gradstrength:
                logZeros[lbl_logZeros==edge] = 0
    elif gradmethod == 'any':
        logZeros = np.zeros(log.shape, np.bool)
        cutoffEdgeLabels = list(set(lbl_logZeros[grad > gradstrength]))[1:]
        for edge in cutoffEdgeLabels:
            logZeros[lbl_logZeros==edge] = 1
    return logZeros
Esempio n. 13
0
def road_transform(gray): 
	dist = cv2.distanceTransform(gray, cv2.DIST_L2, 3)  # get distanceTransform map 
	dist_gauss = ndimage.gaussian_laplace(dist, sigma=3)  # apply gaussian_laplace filter 
	return dist, dist_gauss	
Esempio n. 14
0
 def run(self, ips, snap, img, para = None):
     nimg.gaussian_laplace(snap, para['sigma'], output=img)
Esempio n. 15
0
def log_calc(image, sigma):
    return ndimage.gaussian_laplace(image, sigma)
Esempio n. 16
0
def blob_log(image,
             min_sigma=1,
             max_sigma=50,
             num_sigma=10,
             threshold=.2,
             overlap=.5,
             log_scale=False,
             *,
             exclude_border=False):
    r"""Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : 2D or 3D ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : scalar or sequence of scalars, optional
        the minimum standard deviation for Gaussian kernel. Keep this low to
        detect smaller blobs. The standard deviations of the Gaussian filter
        are given for each axis as a sequence, or as a single number, in
        which case it is equal for all axes.
    max_sigma : scalar or sequence of scalars, optional
        The maximum standard deviation for Gaussian kernel. Keep this high to
        detect larger blobs. The standard deviations of the Gaussian filter
        are given for each axis as a sequence, or as a single number, in
        which case it is equal for all axes.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.
    exclude_border : tuple of ints, int, or False, optional
        If tuple of ints, the length of the tuple must match the input array's
        dimensionality.  Each element of the tuple will exclude peaks from
        within `exclude_border`-pixels of the border of the image along that
        dimension.
        If nonzero int, `exclude_border` excludes peaks from within
        `exclude_border`-pixels of the border of the image.
        If zero or False, peaks are identified regardless of their
        distance from the border.

    Returns
    -------
    A : (n, image.ndim + sigma) ndarray
        A 2d array with each row representing 2 coordinate values for a 2D
        image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
        When a single sigma is passed, outputs are:
        ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
        ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
        deviation of the Gaussian kernel which detected the blob. When an
        anisotropic gaussian is used (sigmas per dimension), the detected sigma
        is returned for each dimension.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[124.        , 336.        ,  11.88888889],
           [198.        , 155.        ,  11.88888889],
           [194.        , 213.        ,  17.33333333],
           [121.        , 272.        ,  17.33333333],
           [263.        , 244.        ,  17.33333333],
           [194.        , 276.        ,  17.33333333],
           [266.        , 115.        ,  11.88888889],
           [128.        , 154.        ,  11.88888889],
           [260.        , 174.        ,  17.33333333],
           [198.        , 103.        ,  11.88888889],
           [126.        , 208.        ,  11.88888889],
           [127.        , 102.        ,  11.88888889],
           [263.        , 302.        ,  17.33333333],
           [197.        ,  44.        ,  11.88888889],
           [185.        , 344.        ,  17.33333333],
           [126.        ,  46.        ,  11.88888889],
           [113.        , 323.        ,   1.        ]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
    a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
    """
    image = img_as_float(image)
    float_dtype = _supported_float_type(image.dtype)
    image = image.astype(float_dtype, copy=False)

    # if both min and max sigma are scalar, function returns only one sigma
    scalar_sigma = (True if np.isscalar(max_sigma) and np.isscalar(min_sigma)
                    else False)

    # Gaussian filter requires that sequence-type sigmas have same
    # dimensionality as image. This broadcasts scalar kernels
    if np.isscalar(max_sigma):
        max_sigma = np.full(image.ndim, max_sigma, dtype=float_dtype)
    if np.isscalar(min_sigma):
        min_sigma = np.full(image.ndim, min_sigma, dtype=float_dtype)

    # Convert sequence types to array
    min_sigma = np.asarray(min_sigma, dtype=float_dtype)
    max_sigma = np.asarray(max_sigma, dtype=float_dtype)

    if log_scale:
        # for anisotropic data, we use the "highest resolution/variance" axis
        standard_axis = np.argmax(min_sigma)
        start = np.log10(min_sigma[standard_axis])
        stop = np.log10(max_sigma[standard_axis])
        scale = np.logspace(start, stop, num_sigma)[:, np.newaxis]
        sigma_list = scale * min_sigma / np.max(min_sigma)
    else:
        scale = np.linspace(0, 1, num_sigma)[:, np.newaxis]
        sigma_list = scale * (max_sigma - min_sigma) + min_sigma

    # computing gaussian laplace
    # average s**2 provides scale invariance
    gl_images = [
        -gaussian_laplace(image, s) * np.mean(s)**2 for s in sigma_list
    ]

    image_cube = np.stack(gl_images, axis=-1)

    exclude_border = _format_exclude_border(image.ndim, exclude_border)
    local_maxima = peak_local_max(
        image_cube,
        threshold_abs=threshold,
        footprint=np.ones((3, ) * (image.ndim + 1)),
        threshold_rel=0.0,
        exclude_border=exclude_border,
    )

    # Catch no peaks
    if local_maxima.size == 0:
        return np.empty((0, 3))

    # Convert local_maxima to float64
    lm = local_maxima.astype(float_dtype)

    # translate final column of lm, which contains the index of the
    # sigma that produced the maximum intensity value, into the sigma
    sigmas_of_peaks = sigma_list[local_maxima[:, -1]]

    if scalar_sigma:
        # select one sigma column, keeping dimension
        sigmas_of_peaks = sigmas_of_peaks[:, 0:1]

    # Remove sigma index and replace with sigmas
    lm = np.hstack([lm[:, :-1], sigmas_of_peaks])

    sigma_dim = sigmas_of_peaks.shape[1]

    return _prune_blobs(lm, overlap, sigma_dim=sigma_dim)
Esempio n. 17
0
File: log.py Progetto: e-koch/BaSiCs
def blob_log(image, sigma_list=None, scale_choice='linear',
             min_sigma=1, max_sigma=50, num_sigma=10,
             threshold=.2, overlap=.5, sigma_ratio=2.,
             weighting=None, merge_overlap_dist=1.0,
             refine_shape=False, use_max_response=False):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    sigma_list : np.ndarray, optional
        Provide the list of sigmas to use.
    scale_choice : str, optional
        'log', 'linear' or 'ratio'. Determines how the scales are calculated
        based on the given number, minimum, maximum, or ratio.
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel. Keep this low to
        detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel. Keep this high to
        detect larger blobs.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    weighting : np.ndarray, optional
        Used to weight certain scales differently when selecting local maxima
        in the transform space. For example when searching for regions near
        the beam size, the transform can be down-weighted to avoid spurious
        detections. Must have the same number of elements as the scales.
    merge_overlap_dist : float, optional
        Controls the minimum overlap regions must have to be merged together.
        Defaults to one sigma separation, where sigma is one of the scales
        used in the transform.

    Returns
    -------
    A : (n, 5) ndarray
        A 2d array with each row representing 5 values,
        ``(y, x, semi-major sigma, semi-minor sigma, pa)``
        where ``(y,x)`` are coordinates of the blob, ``semi-major sigma`` and
        ``semi-minor sigma`` are the standard deviations of the elliptical blob,
        and ``pa`` is its position angle.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[ 113.        ,  323.        ,    1.        ],
           [ 121.        ,  272.        ,   17.33333333],
           [ 124.        ,  336.        ,   11.88888889],
           [ 126.        ,   46.        ,   11.88888889],
           [ 126.        ,  208.        ,   11.88888889],
           [ 127.        ,  102.        ,   11.88888889],
           [ 128.        ,  154.        ,   11.88888889],
           [ 185.        ,  344.        ,   17.33333333],
           [ 194.        ,  213.        ,   17.33333333],
           [ 194.        ,  276.        ,   17.33333333],
           [ 197.        ,   44.        ,   11.88888889],
           [ 198.        ,  103.        ,   11.88888889],
           [ 198.        ,  155.        ,   11.88888889],
           [ 260.        ,  174.        ,   17.33333333],
           [ 263.        ,  244.        ,   17.33333333],
           [ 263.        ,  302.        ,   17.33333333],
           [ 266.        ,  115.        ,   11.88888889]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}sigma`.
    """

    # assert_nD(image, 2)

    image = img_as_float(image)

    if sigma_list is None:
        if scale_choice is 'log':
            start, stop = log(min_sigma, 10), log(max_sigma, 10)
            sigma_list = np.logspace(start, stop, num_sigma)
        elif scale_choice is 'linear':
            sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
        elif scale_choice is 'ratio':
            # k such that min_sigma*(sigma_ratio**k) > max_sigma
            k = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1
            sigma_list = np.array([min_sigma * (sigma_ratio ** i)
                                   for i in range(k)])
        else:
            raise ValueError("scale_choice must be 'log', 'linear', or "
                             "'ratio'.")

    if weighting is not None:
        if len(weighting) != len(sigma_list):
            raise IndexError("weighting must have the same number of elements"
                             " as scales (" + str(len(sigma_list)) + ").")
    else:
        weighting = np.ones_like(sigma_list)

    # computing gaussian laplace
    # s**2 provides scale invariance
    # weighting by w changes the relative importance of each transform scale
    gl_images = [gaussian_laplace(image, s) * s ** 2 * w for s, w in
                 zip(sigma_list, weighting)]
    image_cube = np.dstack(gl_images)

    if use_max_response:
        scale_peaks = \
            peak_local_max(image_cube.max(2),
                           threshold_abs=threshold,
                           threshold_rel=0.0,
                           min_distance=0.5 * np.sqrt(2) * sigma_list[0],
                           exclude_border=False)

        argmaxes = image_cube.argmax(2)[scale_peaks[:, 0], scale_peaks[:, 1]]
        radii = np.array([sigma_list[arg] for arg in argmaxes]) * np.sqrt(2)
        radii = radii[:, np.newaxis]

        responses = image_cube.max(2)[scale_peaks[:, 0], scale_peaks[:, 1]]
        responses = responses[:, np.newaxis]

        pas = np.zeros_like(radii)
        local_maxima = np.hstack([scale_peaks, radii, radii, pas, responses])
    else:
        for i, scale in enumerate(sigma_list):
            scale_peaks = peak_local_max(image_cube[:, :, i],
                                         threshold_abs=threshold,
                                         min_distance=np.sqrt(2) * scale,
                                         threshold_rel=0.0,
                                         exclude_border=False)

            new_scale_peaks = np.empty((len(scale_peaks), 5))
            if refine_shape:
                for j, peak in enumerate(scale_peaks):
                    new_peak = np.array([peak[0], peak[1], scale, scale, 0.0])
                    new_scale_peaks[j] = \
                        shape_from_blob_moments(new_peak, image_cube[:, :, i])
            else:
                new_scale_peaks[:, :2] = scale_peaks
                # sqrt(2) size correction
                new_scale_peaks[:, 2:4] = np.sqrt(2) * scale
                new_scale_peaks[:, 4] = 0.0
                vals = \
                    np.array([image_cube[pos[0], pos[1], i]
                              for pos in scale_peaks]).reshape((len(scale_peaks),
                                                                1))
                new_scale_peaks = np.hstack([new_scale_peaks, vals])

            if i == 0:
                local_maxima = new_scale_peaks
            else:
                local_maxima = np.vstack([local_maxima, new_scale_peaks])

    if local_maxima.size == 0:
        return local_maxima

    # Merge regions into ellipses
    local_maxima = _merge_blobs(local_maxima, merge_overlap_dist)

    # Then prune and return them\
    return local_maxima
Esempio n. 18
0
if 'load' in sys.argv:
    im = plt.imread('remote.jpeg')

target = {
    'c1': [620, 1200],  # [y1, x1]
    'c2': [620, 1830],  # [y1, x2]
    'c3': [1096, 1830],  # [y2, x2]
    'c4': [1096, 1200]
}  # [y2, x1]

c1 = target['c1']
c2 = target['c2']
c3 = target['c3']
c4 = target['c4']

sharpen = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]]

# Create a Field of View
fov = im[c1[0]:c3[0], c1[1]:c3[1]]
fov = ndi.convolve(fov[:, :, 2], np.array(sharpen))

edges = 10 * fov / ndi.gaussian_laplace(im[c1[0]:c3[0], c1[1]:c3[1]][:, :, 2],
                                        sigma=2)
f, ax = plt.subplots(1, 2, sharex=True, sharey=True)
#ax[0].imshow(im[c1[0]:c3[0],c1[1]:c3[1]], 'gray_r')
#ax[1].imshow(edges,'gray_r')
#plt.show()

plt.imsave('fov.jpeg', edges)
os.remove('remote.jpeg')
Esempio n. 19
0
def log_filter(image, sigma, keep_dtype=False):
    """Apply a Laplacian of Gaussian filter to a 2-d or 3-d image.

    The function returns the inverse of the filtered image such that the pixels
    with the highest intensity from the original (smoothed) image have
    positive values. Those with a low intensity returning a negative value are
    clipped to zero.

    Parameters
    ----------
    image : np.ndarray
        Image with shape (z, y, x) or (y, x).
    sigma : float, int, Tuple(float, int) or List(float, int)
        Sigma used for the gaussian filter (one for each dimension). If it's a
        float, the same sigma is applied to every dimensions.
    keep_dtype : bool
        Cast output image as input image.

    Returns
    -------
    image_filtered : np.ndarray
        Filtered image.

    """
    # check parameters
    check_array(image,
                ndim=[2, 3],
                dtype=[np.uint8, np.uint16, np.float32, np.float64])
    check_parameter(sigma=(float, int, tuple, list))

    # we cast the data in np.float to allow negative values
    if image.dtype == np.uint8:
        image_float = cast_img_float32(image)
    elif image.dtype == np.uint16:
        image_float = cast_img_float64(image)
    else:
        image_float = image

    # check sigma
    if isinstance(sigma, (tuple, list)):
        if len(sigma) != image.ndim:
            raise ValueError("'sigma' must be a scalar or a sequence with the "
                             "same length as 'image.ndim'.")

    # we apply LoG filter
    image_filtered = gaussian_laplace(image_float, sigma=sigma)

    # as the LoG filter makes the peaks in the original image appear as a
    # reversed mexican hat, we inverse the result and clip negative values to 0
    image_filtered = np.clip(-image_filtered, a_min=0, a_max=None)

    # cast filtered image
    if keep_dtype:
        if image.dtype == np.uint8:
            image_filtered = cast_img_uint8(image_filtered)
        elif image.dtype == np.uint16:
            image_filtered = cast_img_uint16(image_filtered)
        else:
            pass

    return image_filtered
Esempio n. 20
0
def main(vid_id):

    print("process video: " + str(vid_id))

    #if need to write video, you need to set the correct frame image path
    IMAGE_PATH = '....../04_26_20/frames256_instances/{}/{}/image_{:05d}.jpg'
    write_video = False
    fps = 6

    # deal with index/position
    offset = 4  # this is after downsample, the end frame index of the current segment (4-th) of the first window
    downsample = 3
    offset *= downsample

    # hyper-para for boundary detection
    smooth_factor = 5
    LoG_sigma = 15

    # Update these
    exp_path = '../../data/exp_TAPOS'
    output_seg_dir = 'detect_seg'
    SAVE_PATH = exp_path + output_seg_dir + '/{}/frame_{{:05d}}.jpg'
    OUTPUT_PATH = exp_path + output_seg_dir + '/{}.mp4'
    OUTPUT_BDY_PATH = exp_path + output_seg_dir + '/{}.pkl'

    if not os.path.exists(exp_path + output_seg_dir):
        os.makedirs(exp_path + output_seg_dir)

    def detect_boundary(signal, offset, sigma):
        bdy_idx = []
        LoG_mean_errors = ndimage.gaussian_laplace(signal,
                                                   sigma=LoG_sigma,
                                                   mode='nearest')
        delta_LoG = np.gradient(LoG_mean_errors)
        for i in range(len(signal) - 1):
            if delta_LoG[i] >= 0 and delta_LoG[i + 1] <= 0:
                if delta_LoG[i] > -delta_LoG[i + 1]:
                    bdy_idx += [i + offset + 1]
                else:
                    bdy_idx += [i + offset]
        return bdy_idx

    # load GT boundary timestamps and convert into index here
    with open('../../data/export/tapos_annotation_timestamps_myfps.json',
              'r') as f:
        tapos_dict = json.load(f)

    # read and pre-process prediction error for each video
    windows = sorted(glob(exp_path + 'pred_err/err_vid_' + vid_id + '*.pkl'))
    if windows == []:
        return

    pred_err = []
    for window_path in windows:
        with open(window_path, 'rb') as f:
            data = pickle.load(f, encoding='latin1')
            pred_err.append(data['pred_err'])
    mean_errors = -np.nanmean(np.stack(pred_err, axis=0), axis=(1, 2, 3, 4))

    save_path = SAVE_PATH.format(vid_id)
    output_path = OUTPUT_PATH.format(vid_id)
    output_bdy_path = OUTPUT_BDY_PATH.format(vid_id)

    if osp.exists(osp.dirname(save_path)):
        shutil.rmtree(osp.dirname(save_path))
    os.mkdir(osp.dirname(save_path))
    print(osp.dirname(save_path))

    if len(mean_errors) < 2:
        return

    bdy_idx_list = detect_boundary(mean_errors, offset=offset, sigma=LoG_sigma)
    bdy_idx_list_smt = detect_boundary(filters.gaussian_filter1d(
        mean_errors, smooth_factor),
                                       offset=offset,
                                       sigma=LoG_sigma)
    bdy_idx_save = {}
    bdy_idx_save['bdy_idx_list'] = bdy_idx_list
    bdy_idx_save['bdy_idx_list_smt'] = bdy_idx_list_smt
    pickle.dump(bdy_idx_save, open(output_bdy_path, "wb"))

    if write_video is False:
        return

    writer_og = VideoWriter('{img_dir}/{vid_id}_og.mp4'.format(
        img_dir=osp.dirname(osp.dirname(SAVE_PATH)), vid_id=vid_id),
                            fps=fps)

    # e.g. vid 2IO8DO_QbRE_s00018_5_324_13_160
    v = vid_id[:11]
    s = vid_id[12:]
    myfps = tapos_dict[v][s]['myfps']
    instance_start_idx = tapos_dict[v][s]['substages_myframeidx'][0]
    bdy_idx_list_gt = []
    for idx in tapos_dict[v][s]['substages_myframeidx'][1:-1]:
        bdy_idx_list_gt.append(int((idx - instance_start_idx) / downsample))

    for j in tqdm.tqdm(range(0, data['vlen'], downsample)):
        x = np.arange(offset, offset + len(mean_errors))
        fig = plt.figure(figsize=(10, 8))
        gs = gridspec.GridSpec(nrows=5, ncols=2)
        ax1 = fig.add_subplot(gs[0, 0])

        # Signal is in mean_errors.
        ax1.plot(x,
                 filters.gaussian_filter1d(mean_errors, smooth_factor),
                 lw=2)
        ax1.axvline(j / downsample, color='red')
        for bdy_idx in bdy_idx_list_gt:  # plot GT boundaries
            ax1.axvline(bdy_idx, color='cyan', ls='-')
        for bdy_idx in bdy_idx_list_smt:  # plot our detected boundaries
            ax1.axvline(bdy_idx, color='green', ls='--')
        ax1.set_xlim(0, len(mean_errors) + offset + offset_end)
        ax1.set_title('Smooth Pred Acc')

        ax2 = fig.add_subplot(gs[0, 1])
        ax2.plot(x, mean_errors, lw=2)
        ax2.axvline(j / downsample, color='red')
        for bdy_idx in bdy_idx_list_gt:  # plot GT boundaries
            ax2.axvline(bdy_idx, color='cyan', ls='-')
        for bdy_idx in bdy_idx_list:
            ax2.axvline(bdy_idx, color='green', ls='--')
        ax2.set_title('Pred Acc')
        ax2.set_xlim(0, len(mean_errors) + offset + offset_end)

        ax4 = fig.add_subplot(gs[1, 0])
        LoG_smt_mean_errors = ndimage.gaussian_laplace(
            filters.gaussian_filter1d(mean_errors, smooth_factor),
            sigma=LoG_sigma,
            mode='nearest')
        ax4.plot(x, LoG_smt_mean_errors, lw=2)
        ax4.axvline(j / downsample, color='red')
        for bdy_idx in bdy_idx_list_gt:  # plot GT boundaries
            ax4.axvline(bdy_idx, color='cyan', ls='-')
        for bdy_idx in bdy_idx_list_smt:
            ax4.axvline(bdy_idx, color='green', ls='--')
        ax4.set_title('LoG Smooth Pred Acc')
        ax4.set_xlim(0, len(mean_errors) + offset + offset_end)

        ax5 = fig.add_subplot(gs[1, 1])
        LoG_mean_errors = ndimage.gaussian_laplace(mean_errors,
                                                   sigma=LoG_sigma,
                                                   mode='nearest')
        ax5.plot(x, LoG_mean_errors, lw=2)
        ax5.axvline(j / downsample, color='red')
        for bdy_idx in bdy_idx_list_gt:  # plot GT boundaries
            ax5.axvline(bdy_idx, color='cyan', ls='-')
        for bdy_idx in bdy_idx_list:
            ax5.axvline(bdy_idx, color='green', ls='--')
        ax5.set_title('LoG Pred Acc')
        ax5.set_xlim(0, len(mean_errors) + offset + offset_end)

        ax6 = fig.add_subplot(gs[2, 0])
        ax6.plot(x, np.gradient(LoG_smt_mean_errors), lw=2)
        ax6.axvline(j / downsample, color='red')
        for bdy_idx in bdy_idx_list_gt:  # plot GT boundaries
            ax6.axvline(bdy_idx, color='cyan', ls='-')
        for bdy_idx in bdy_idx_list_smt:
            ax6.axvline(bdy_idx, color='green', ls='--')
        ax6.axhline(0, color='green', ls='--')
        ax6.set_title('$\Delta$ LoG Smooth Pred Acc')
        ax6.set_xlim(0, len(mean_errors) + offset + offset_end)

        ax7 = fig.add_subplot(gs[2, 1])
        ax7.plot(x, np.gradient(LoG_mean_errors), lw=2)
        ax7.axvline(j / downsample, color='red')
        for bdy_idx in bdy_idx_list_gt:  # plot GT boundaries
            ax7.axvline(bdy_idx, color='cyan', ls='-')
        for bdy_idx in bdy_idx_list:
            ax7.axvline(bdy_idx, color='green', ls='--')
        ax7.axhline(0, color='green', ls='--')
        ax7.set_title('$\Delta$ LoG Pred Acc')
        ax7.set_xlim(0, len(mean_errors) + offset + offset_end)

        ax3 = fig.add_subplot(gs[-2:, :])
        im = plt.imread(IMAGE_PATH.format(v, s, (j + 1)))
        cv2.putText(im, str(j), (20, 20), 0, 1, (57, 255, 20), thickness=2)
        writer_og.add_image(im)
        ax3.imshow(im)
        ax3.axis('off')

        plt.tight_layout()
        plt.savefig(fname=save_path.format(int(j / downsample)), dpi=150)
        plt.close()
    cmd = [
        'ffmpeg',
        '-y',
        '-threads',
        '16',
        '-framerate',
        str(fps),
        '-i',
        '{img_dir}/frame_%05d.jpg'.format(img_dir=osp.dirname(save_path)),
        '-profile:v',
        'baseline',
        '-level',
        '3.0',
        '-c:v',
        'libx264',
        '-pix_fmt',
        'yuv420p',
        '-an',
        # Note that if called as a string, ffmpeg needs quotes around the
        # scale invocation.
        '-vf',
        'scale=trunc(iw/2)*2:trunc(ih/2)*2',
        output_path,
    ]
    print(' '.join(cmd))
    try:
        err = subprocess.call(cmd)
        if err:
            print('Subprocess failed')
    except OSError:
        print('Failed to run ffmpeg')
    writer_og.make_video()
    writer_og.close()
Esempio n. 21
0
plt.plot(y, x, 'or', ms=4)

from skimage import morphology
markers = np.zeros(im_denoised.shape, dtype=np.int)
markers[x.astype(np.int), y.astype(np.int)] = np.arange(len(x)) + 1
markers = morphology.dilation(markers, morphology.disk(7))

from scipy import ndimage
from skimage import morphology
# Black tophat transformation (see https://en.wikipedia.org/wiki/Top-hat_transform)
hat = ndimage.black_tophat(im_denoised, 7)
# Combine with denoised image
hat -= 0.3 * im_denoised
# Morphological dilation to try to remove some holes in hat image
hat = morphology.dilation(hat)
plt.imshow(hat, cmap='spectral')

labels_hat = morphology.watershed(hat, markers)
from skimage import color
color_labels = color.label2rgb(labels_hat, im_denoised)
plt.imshow(color_labels[:300, :300])

# A different markers image: laplace filter
lap = ndimage.gaussian_laplace(im_denoised, sigma=0.7)
lap = restoration.nl_means_denoising(lap, h=0.002)
plt.imshow(lap, cmap='spectral')
plt.colorbar()

labels_lap = morphology.watershed(lap, markers)
color_labels = color.label2rgb(labels_lap, im_denoised)
plt.imshow(color_labels)
"""
Esempio n. 23
0
for i in [1, 2, 3]:
    if i == 1:

        k = np.ones([5, 3])
        k = k / k.sum()
        title = "ndimage.convolve 5x3"
        print(title)
        smoothed = ndimage.convolve(data_2D, k, mode='nearest')

    if i == 2:

        if False:
            title = "gaussian_laplace"
            print("title")
            #smoothed = data_2D + ndimage.gaussian_laplace(data_2D, 3, mode='nearest') * 10.
            smoothed = ndimage.gaussian_laplace(data_2D, 3,
                                                mode='nearest') * 10.

        if True:
            #sigma=1/2.*np.array([4.5,3.0])  # no artefacts more for shifted fields
            sigma = 1 / 3. * np.array([4.5, 3.0
                                       ])  # conserves a bit better the maxima
            title = "gaussian_filter " + str(sigma)
            print("... image 2: ", title)
            smoothed = ndimage.filters.gaussian_filter(data_2D,
                                                       sigma,
                                                       mode='nearest')

    if i == 3:

        if True:
            sigma = 1 / 2. * np.array(
Esempio n. 24
0

a = Image.open('resize.jpg').convert('L')
a = mi.fromimage(a)

thresh = filters.threshold_otsu(a)
im_otsu = a > thresh
im_otsu = mi.toimage(im_otsu)
im_otsu.save('otsu_semoutput.png')

im_canny = feature.canny(a, sigma=3)
fill_holes = nd.binary_fill_holes(im_canny)
fill_holes = mi.toimage(fill_holes)
im_canny = mi.toimage(im_canny)
im_canny.save('test_canny.jpg')

#to remove bushes try to adopt canny edges for leaves, fill holes and then negate obtained mask
fill_holes.save('fill_holes.jpg')

im_laplace = nd.gaussian_laplace(a, 3)
im_laplace = mi.toimage(im_laplace)
im_laplace.save('test_laplace.jpg')

im_elevation_map = filters.sobel(a)
markers = np.zeros_like(a)
markers[a < 30] = 1
markers[a > 150] = 2
segmentation = watershed(im_elevation_map, markers)
im_sobel = mi.toimage(segmentation)
im_sobel.save('im_sobel.jpg')
Esempio n. 25
0
    def addCell(self, eventTuple):
        if self.maskOn:
            if self.data.ndim == 2:
                self.aveData = self.data.copy()
            else:
                self.aveData = self.data.mean(axis=2)

            x, y = eventTuple
            localValue = self.currentMask[x, y]
            print str(self.mode) + " " + "x: " + str(x) + ", y: " + str(y) + ", mask val: " + str(localValue)

            # ensure mask is uint16
            self.currentMask = self.currentMask.astype("uint16")

            sys.stdout.flush()

            ########## NORMAL MODE
            if self.mode is None:
                if localValue > 0 and localValue != self.currentMaskNumber:
                    print "we are altering mask at at %d, %d" % (x, y)

                    # copy the old mask
                    newMask = self.currentMask.copy()

                    # make a labeled image of the current mask
                    labeledCurrentMask = mahotas.label(newMask)[0]
                    roiNumber = labeledCurrentMask[x, y]

                    # set that ROI to zero
                    newMask[labeledCurrentMask == roiNumber] = self.currentMaskNumber
                    newMask = newMask.astype("uint16")

                    self.listOfMasks.append(newMask)
                    self.currentMask = self.listOfMasks[-1]
                elif localValue > 0 and self.data.ndim == 3:
                    # update info panel
                    labeledCurrentMask = mahotas.label(self.currentMask.copy())[0]
                    roiNumber = labeledCurrentMask[x, y]
                    self.updateInfoPanel(ROI_number=roiNumber)

                elif localValue == 0:

                    xmin = int(x - self.diskSize)
                    xmax = int(x + self.diskSize)
                    ymin = int(y - self.diskSize)
                    ymax = int(y + self.diskSize)

                    sub_region_image = self.aveData[xmin:xmax, ymin:ymax].copy()
                    # threshold = mahotas.otsu(self.data[xmin:xmax, ymin:ymax].astype('uint16'))

                    # do a gaussian_laplacian filter to find the edges and the center

                    g_l = nd.gaussian_laplace(
                        sub_region_image, 1
                    )  # second argument is a free parameter, std of gaussian
                    g_l = mahotas.dilate(mahotas.erode(g_l >= 0))
                    g_l = mahotas.label(g_l)[0]
                    center = g_l == g_l[g_l.shape[0] / 2, g_l.shape[0] / 2]
                    # edges = mahotas.dilate(mahotas.dilate(mahotas.dilate(center))) - center

                    newCell = np.zeros_like(self.currentMask)
                    newCell[xmin:xmax, ymin:ymax] = center
                    newCell = mahotas.dilate(newCell)

                    if self.useNMF:
                        modes, thresh_modes, fit_data, this_cell, is_cell, nmf_limits = self.doLocalNMF(x, y, newCell)

                        for mode, mode_thresh, t, i in zip(modes, thresh_modes, this_cell, is_cell):
                            # need to place it in the right place
                            # have x and y
                            mode_width, mode_height = mode_thresh.shape
                            mode_thresh_fullsize = np.zeros_like(newCell)
                            mode_thresh_fullsize[
                                nmf_limits[0] : nmf_limits[1], nmf_limits[2] : nmf_limits[3]
                            ] = mode_thresh

                            # need to add all modes belonging to this cell first,
                            # then remove the ones nearby.

                            if i:
                                if t:
                                    valid_area = np.logical_and(
                                        mahotas.dilate(
                                            mahotas.dilate(mahotas.dilate(mahotas.dilate(newCell.astype(bool))))
                                        ),
                                        mode_thresh_fullsize,
                                    )
                                    newCell = np.logical_or(newCell.astype(bool), valid_area)
                                else:
                                    newCell = np.logical_and(
                                        newCell.astype(bool), np.logical_not(mahotas.dilate(mode_thresh_fullsize))
                                    )

                        newCell = mahotas.close_holes(newCell.astype(bool))
                        self.excludePixels(newCell, 2)

                    newCell = newCell.astype(self.currentMask.dtype)

                    # remove all pixels in and near current mask and filter for ROI size
                    newCell[mahotas.dilate(self.currentMask > 0)] = 0
                    newCell = self.excludePixels(newCell, 10)

                    newMask = (newCell * self.currentMaskNumber) + self.currentMask
                    newMask = newMask.astype("uint16")

                    self.listOfMasks.append(newMask.copy())
                    self.currentMask = newMask.copy()

            elif self.mode is "OGB":
                # build structuring elements
                se = pymorph.sebox()
                se2 = pymorph.sedisk(self.cellRadius, metric="city-block")
                seJunk = pymorph.sedisk(max(np.floor(self.cellRadius / 4.0), 1), metric="city-block")
                seExpand = pymorph.sedisk(self.diskSize, metric="city-block")

                # add a disk around selected point, non-overlapping with adjacent cells
                dilatedOrignal = mahotas.dilate(self.currentMask.astype(bool), Bc=se)
                safeUnselected = np.logical_not(dilatedOrignal)

                # tempMask is
                tempMask = np.zeros_like(self.currentMask, dtype=bool)
                tempMask[x, y] = True
                tempMask = mahotas.dilate(tempMask, Bc=se2)
                tempMask = np.logical_and(tempMask, safeUnselected)

                # calculate the area we should add to this disk based on % of a threshold
                cellMean = self.aveData[tempMask == 1.0].mean()
                allMeanBw = self.aveData >= (cellMean * float(self.contrastThreshold))

                tempLabel = mahotas.label(np.logical_and(allMeanBw, safeUnselected).astype(np.uint16))[0]
                connMeanBw = tempLabel == tempLabel[x, y]

                connMeanBw = np.logical_and(np.logical_or(connMeanBw, tempMask), safeUnselected).astype(np.bool)
                # erode and then dilate to remove sharp bits and edges

                erodedMean = mahotas.erode(connMeanBw, Bc=seJunk)
                dilateMean = mahotas.dilate(erodedMean, Bc=seJunk)
                dilateMean = mahotas.dilate(dilateMean, Bc=seExpand)

                modes, thresh_modes, fit_data, this_cell, is_cell, limits = self.doLocaNMF(x, y)

                newCell = np.logical_and(dilateMean, safeUnselected)
                newMask = (newCell * self.currentMaskNumber) + self.currentMask
                newMask = newMask.astype("uint16")

                self.listOfMasks.append(newMask.copy())
                self.currentMask = newMask.copy()

            ########## SQUARE MODE
            elif self.mode is "square":
                self.modeData.append((x, y))
                if len(self.modeData) == 2:
                    square_mask = np.zeros_like(self.currentMask)
                    xstart = self.modeData[0][0]
                    ystart = self.modeData[0][1]

                    xend = self.modeData[1][0]
                    yend = self.modeData[1][1]

                    square_mask[xstart:xend, ystart:yend] = 1

                    # check if square_mask interfers with current mask, if so, abort
                    if np.any(np.logical_and(square_mask, self.currentMask)):
                        return None

                    # add square_mask to mask
                    newMask = (square_mask * self.currentMaskNumber) + self.currentMask
                    newMask = newMask.astype("uint16")

                    self.listOfMasks.append(newMask)
                    self.currentMask = self.listOfMasks[-1]

                    # clear current mode data
                    self.clearModeData()

            ########## CIRCLE MODE
            elif self.mode is "circle":
                # make a strel and move it in place to make circle_mask
                if self.diskSize < 1:
                    return None

                if self.diskSize is 1:
                    se = np.ones((1, 1))
                elif self.diskSize is 2:
                    se = pymorph.secross(r=1)
                else:
                    se = pymorph.sedisk(r=(self.diskSize - 1))

                se_extent = int(se.shape[0] / 2)
                circle_mask = np.zeros_like(self.currentMask)
                circle_mask[x - se_extent : x + se_extent + 1, y - se_extent : y + se_extent + 1] = se * 1.0
                circle_mask = circle_mask.astype(bool)

                # check if circle_mask interfers with current mask, if so, abort
                if np.any(np.logical_and(circle_mask, mahotas.dilate(self.currentMask.astype(bool)))):
                    return None

                # add circle_mask to mask
                newMask = (circle_mask * self.currentMaskNumber) + self.currentMask
                newMask = newMask.astype("uint16")

                self.listOfMasks.append(newMask)
                self.currentMask = self.listOfMasks[-1]

            ########## POLY MODE
            elif self.mode is "poly":
                self.modeData.append((x, y))

            sys.stdout.flush()
            self.makeNewMaskAndBackgroundImage()
Esempio n. 26
0
 def __init__(self, image=[[0.0]], sigma=1, **options):
     from scipy import ndimage
     self.ax = ndimage.gaussian_laplace(image, sigma=sigma, **options)
Esempio n. 27
0
# axes[2].imshow(img2, cmap='gray')
# axes[2].set_title('blur via Gaussian filter')
# plt.show()
'''
实验任务2- 仿照上面高斯滤波的例子,尝试自己编写 ndimage.gaussian_laplace 滤波器与卷积的对比。
'''
from scipy import fftpack

file_name = 'D:\Desktop/canoe.tif'
img = cv2.imread(file_name, 0)
img = (img - np.min(img)) / (np.max(img) - np.min(img))
fig, axes = plt.subplots(1, 3)
axes[0].imshow(img, cmap='gray')
axes[0].set_title('source')

img_cv = ndimage.gaussian_laplace(img, sigma=2)
img_cv = (img_cv - np.min(img_cv)) / (np.max(img_cv) - np.min(img_cv))
axes[1].imshow(img_cv, cmap='gray')
axes[1].set_title('convolve gaussian kernel')

#get the gaussian kernel
w = 100
h = 100

in_mask = np.zeros((h, w), dtype=np.float32)
in_mask[int(h / 2), int(w / 2)] = 1
img_kernel = ndimage.gaussian_laplace(in_mask, sigma=2)
#
kernel = img_kernel[50 - 10:50 + 10, 50 - 10:50 + 10]
kernel_ft = fftpack.fft2(kernel, shape=img.shape[:2], axes=(0, 1))
Esempio n. 28
0
from scipy import ndimage, misc
import matplotlib.pyplot as plt

ascent = misc.ascent()

fig = plt.figure()
plt.gray()  # show the filtered result in grayscale
ax1 = fig.add_subplot(121)  # left side
ax2 = fig.add_subplot(122)  # right side

result = ndimage.gaussian_laplace(ascent, sigma=1)
ax1.imshow(result)

result = ndimage.gaussian_laplace(ascent, sigma=3)
ax2.imshow(result)
plt.show()
Esempio n. 29
0
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, thresholds=[.2],
             overlap=.5, log_scale=False, *, exclude_border=False):
    r"""Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : 2D or 3D ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : scalar or sequence of scalars, optional
        the minimum standard deviation for Gaussian kernel. Keep this low to
        detect smaller blobs. The standard deviations of the Gaussian filter
        are given for each axis as a sequence, or as a single number, in
        which case it is equal for all axes.
    max_sigma : scalar or sequence of scalars, optional
        The maximum standard deviation for Gaussian kernel. Keep this high to
        detect larger blobs. The standard deviations of the Gaussian filter
        are given for each axis as a sequence, or as a single number, in
        which case it is equal for all axes.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.
    exclude_border : int or bool, optional
        If nonzero int, `exclude_border` excludes blobs from
        within `exclude_border`-pixels of the border of the image.

    Returns
    -------
    A : (n, image.ndim + sigma) ndarray
        A 2d array with each row representing 2 coordinate values for a 2D
        image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
        When a single sigma is passed, outputs are:
        ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
        ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
        deviation of the Gaussian kernel which detected the blob. When an
        anisotropic gaussian is used (sigmas per dimension), the detected sigma
        is returned for each dimension.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[ 266.        ,  115.        ,   11.88888889],
           [ 263.        ,  302.        ,   17.33333333],
           [ 263.        ,  244.        ,   17.33333333],
           [ 260.        ,  174.        ,   17.33333333],
           [ 198.        ,  155.        ,   11.88888889],
           [ 198.        ,  103.        ,   11.88888889],
           [ 197.        ,   44.        ,   11.88888889],
           [ 194.        ,  276.        ,   17.33333333],
           [ 194.        ,  213.        ,   17.33333333],
           [ 185.        ,  344.        ,   17.33333333],
           [ 128.        ,  154.        ,   11.88888889],
           [ 127.        ,  102.        ,   11.88888889],
           [ 126.        ,  208.        ,   11.88888889],
           [ 126.        ,   46.        ,   11.88888889],
           [ 124.        ,  336.        ,   11.88888889],
           [ 121.        ,  272.        ,   17.33333333],
           [ 113.        ,  323.        ,    1.        ]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
    a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
    """
    image = img_as_float(image)

    # if both min and max sigma are scalar, function returns only one sigma
    scalar_sigma = (
        True if np.isscalar(max_sigma) and np.isscalar(min_sigma) else False
    )

    # Gaussian filter requires that sequence-type sigmas have same
    # dimensionality as image. This broadcasts scalar kernels
    if np.isscalar(max_sigma):
        max_sigma = np.full(image.ndim, max_sigma, dtype=float)
    if np.isscalar(min_sigma):
        min_sigma = np.full(image.ndim, min_sigma, dtype=float)

    # Convert sequence types to array
    min_sigma = np.asarray(min_sigma, dtype=float)
    max_sigma = np.asarray(max_sigma, dtype=float)

    if log_scale:
        start, stop = np.log10(min_sigma)[:, None], np.log10(max_sigma)[:, None]
        space = np.concatenate(
            [start, stop, np.full_like(start, num_sigma)], axis=1)
        sigma_list = np.stack([np.logspace(*s) for s in space], axis=1)
    else:
        scale = np.linspace(0, 1, num_sigma)[:, None]
        sigma_list = scale * (max_sigma - min_sigma) + min_sigma

    import time
    start = time.time()
    # computing gaussian laplace
    # average s**2 provides scale invariance
    gl_images = [-gaussian_laplace(image, s) * s ** 2
                 for s in np.mean(sigma_list, axis=1)]

    image_cube = np.stack(gl_images, axis=-1)
    # print("LoG filter took {}s".format(round(time.time() - start, 2)))
    ret = list()
    times = list()
    for threshold in thresholds:
        start = time.time()
        local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3,) * (image.ndim + 1)),
                                  threshold_rel=0.0,
                                  exclude_border=exclude_border)

        # Catch no peaks
        if local_maxima.size == 0:
            ret.append(np.empty((0, 4)))
            continue

        # Convert local_maxima to float64
        lm = local_maxima.astype(np.float64)

        # translate final column of lm, which contains the index of the
        # sigma that produced the maximum intensity value, into the sigma
        sigmas_of_peaks = sigma_list[local_maxima[:, -1]]

        if scalar_sigma:
            # select one sigma column, keeping dimension
            sigmas_of_peaks = sigmas_of_peaks[:, 0:1]

        # Remove sigma index and replace with sigmas
        lm = np.hstack([lm[:, :-1], sigmas_of_peaks])

        ret.append(_prune_blobs(lm, overlap))
        times.append(time.time() - start)
    # print("Thresholds took on avg: {}s".format(round(np.average(np.array(times)),2)))
    return np.asarray(ret)


# def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
#              overlap=.5, log_scale=False):
#     """Finds blobs in the given grayscale image.

#     Blobs are found using the Determinant of Hessian method [1]_. For each blob
#     found, the method returns its coordinates and the standard deviation
#     of the Gaussian Kernel used for the Hessian matrix whose determinant
#     detected the blob. Determinant of Hessians is approximated using [2]_.

#     Parameters
#     ----------
#     image : 2D ndarray
#         Input grayscale image.Blobs can either be light on dark or vice versa.
#     min_sigma : float, optional
#         The minimum standard deviation for Gaussian Kernel used to compute
#         Hessian matrix. Keep this low to detect smaller blobs.
#     max_sigma : float, optional
#         The maximum standard deviation for Gaussian Kernel used to compute
#         Hessian matrix. Keep this high to detect larger blobs.
#     num_sigma : int, optional
#         The number of intermediate values of standard deviations to consider
#         between `min_sigma` and `max_sigma`.
#     threshold : float, optional.
#         The absolute lower bound for scale space maxima. Local maxima smaller
#         than thresh are ignored. Reduce this to detect less prominent blobs.
#     overlap : float, optional
#         A value between 0 and 1. If the area of two blobs overlaps by a
#         fraction greater than `threshold`, the smaller blob is eliminated.
#     log_scale : bool, optional
#         If set intermediate values of standard deviations are interpolated
#         using a logarithmic scale to the base `10`. If not, linear
#         interpolation is used.

#     Returns
#     -------
#     A : (n, 3) ndarray
#         A 2d array with each row representing 3 values, ``(y,x,sigma)``
#         where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
#         standard deviation of the Gaussian kernel of the Hessian Matrix whose
#         determinant detected the blob.

#     References
#     ----------
#     .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian

#     .. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
#            "SURF: Speeded Up Robust Features"
#            ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf

#     Examples
#     --------
#     >>> from skimage import data, feature
#     >>> img = data.coins()
#     >>> feature.blob_doh(img)
#     array([[ 270.        ,  363.        ,   30.        ],
#            [ 265.        ,  113.        ,   23.55555556],
#            [ 262.        ,  243.        ,   23.55555556],
#            [ 260.        ,  173.        ,   30.        ],
#            [ 197.        ,  153.        ,   20.33333333],
#            [ 197.        ,   44.        ,   20.33333333],
#            [ 195.        ,  100.        ,   23.55555556],
#            [ 193.        ,  275.        ,   23.55555556],
#            [ 192.        ,  212.        ,   23.55555556],
#            [ 185.        ,  348.        ,   30.        ],
#            [ 156.        ,  302.        ,   30.        ],
#            [ 126.        ,  153.        ,   20.33333333],
#            [ 126.        ,  101.        ,   20.33333333],
#            [ 124.        ,  336.        ,   20.33333333],
#            [ 123.        ,  205.        ,   20.33333333],
#            [ 123.        ,   44.        ,   23.55555556],
#            [ 121.        ,  271.        ,   30.        ]])

#     Notes
#     -----
#     The radius of each blob is approximately `sigma`.
#     Computation of Determinant of Hessians is independent of the standard
#     deviation. Therefore detecting larger blobs won't take more time. In
#     methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
#     of Gaussians for larger `sigma` takes more time. The downside is that
#     this method can't be used for detecting blobs of radius less than `3px`
#     due to the box filters used in the approximation of Hessian Determinant.
#     """
#     assert_nD(image, 2)

#     image = img_as_float(image)
#     image = integral_image(image)

#     if log_scale:
#         start, stop = log(min_sigma, 10), log(max_sigma, 10)
#         sigma_list = np.logspace(start, stop, num_sigma)
#     else:
#         sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)

#     hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
#     image_cube = np.dstack(hessian_images)

#     local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
#                                   footprint=np.ones((3,) * image_cube.ndim),
#                                   threshold_rel=0.0,
#                                   exclude_border=False)

#     # Catch no peaks
#     if local_maxima.size == 0:
#         return np.empty((0, 3))
#     # Convert local_maxima to float64
#     lm = local_maxima.astype(np.float64)
#     # Convert the last index to its corresponding scale value
#     lm[:, -1] = sigma_list[local_maxima[:, -1]]
#     return _prune_blobs(lm, overlap)
Esempio n. 30
0
    def _update_spline_plot(self):
        """Update the spline plot."""
        knots = np.mgrid[0:1:((self.num_internal_knots + 2) * 1j)][1:-1]
        medial_repr = self.current_object.aligned_version.medial_repr
        dependent_variable = np.mgrid[0:1:(medial_repr.length * 1j)]
        laplacian = ndimage.gaussian_laplace(medial_repr.width_curve,
                                             self.smoothing,
                                             mode='constant',
                                             cval=np.nan)
        m_spline = LSQUnivariateSpline(dependent_variable,
                                       medial_repr.medial_axis, knots)
        w_spline = LSQUnivariateSpline(dependent_variable,
                                       medial_repr.width_curve, knots)
        # sample at double the frequency
        spl_dep_var = np.mgrid[0:1:(medial_repr.length * 2j)]
        plots = self.plots
        if plots is None:
            # Render the plot for the first time.
            plotdata = ArrayPlotData(
                medial_x=dependent_variable,
                medial_y=medial_repr.medial_axis,
                width_x=dependent_variable,
                width_y=medial_repr.width_curve,
                medial_spline_x=spl_dep_var,
                medial_spline_y=m_spline(spl_dep_var),
                width_spline_x=spl_dep_var,
                width_spline_y=w_spline(spl_dep_var),
                laplacian_y=laplacian,
            )
            plot = Plot(plotdata)

            # Width data
            self._width_data_renderer, = plot.plot(
                ("width_x", "width_y"),
                type="line",
                color="blue",
                name="Original width curve data")

            filterdata = ArrayPlotData(x=dependent_variable,
                                       laplacian=laplacian)
            filterplot = Plot(filterdata)
            self._laplacian_renderer, = filterplot.plot(
                ("x", "laplacian"),
                type="line",
                color="black",
                name="Laplacian-of-Gaussian")

            # Titles for plot & axes
            plot.title = "Width curves"
            plot.x_axis.title = "Normalized position on medial axis"
            plot.y_axis.title = "Fraction of medial axis width"

            # Legend mangling stuff
            legend = plot.legend
            plot.legend = None
            legend.set(component=None,
                       visible=True,
                       resizable="",
                       auto_size=True,
                       bounds=[250, 70],
                       padding_top=plot.padding_top)

            filterlegend = filterplot.legend
            filterplot.legend = None
            filterlegend.set(component=None,
                             visible=True,
                             resizable="",
                             auto_size=True,
                             bounds=[250, 50],
                             padding_top=filterplot.padding_top)

            self.plots = GridPlotContainer(plot,
                                           legend,
                                           filterplot,
                                           filterlegend,
                                           shape=(2, 2),
                                           valign="top",
                                           bgcolor="transparent")

        else:

            # Update the real width curve
            self._width_data_renderer.index.set_data(dependent_variable)
            self._width_data_renderer.value.set_data(medial_repr.width_curve)

            # Render the Laplacian
            self._laplacian_renderer.index.set_data(dependent_variable)
            self._laplacian_renderer.value.set_data(laplacian)
def extract_data(img_rgb,laplacians,df,dir_input):
	image_gray = rgb2gray(img_rgb)

	img_red = img_rgb[:,:,0]
	img_red = gaussian(img_red, 3)
	s = 1.5/sqrt(2)
	img_log = ndimage.gaussian_laplace(img_red, s) * s ** 2
	img_log = np.float32(img_log)

	t_scales,row,col = laplacians.shape

	training_set = np.array([])

	#fig, ax = plt.subplots(nrows=1, ncols=1)
	#ax.set_title("RGB Image")
	#ax.imshow(image_gray,interpolation='nearest')
	for index,rows in df.iterrows():
		x = rows['x']#col
		y = rows['y']#fila
		w = rows['w']
		h = rows['h']
		label = rows['label']
		
		mask = np.zeros(image_gray.shape)

		if label == 'juvenil':
			r = w/2 if w < h else h/2
			s = np.linspace(0, 2*np.pi, 400)
			px = x + r + r*np.cos(s)
			py = y + r + r*np.sin(s)
			init = np.array([px, py]).T
			snake = segmentation.active_contour(img_log,init,alpha=0.5, beta=0.5,w_line=-0.4)
			px,py = snake[:,0].astype(int),snake[:,1].astype(int)
			#ax.plot(snake[:, 0], snake[:, 1], '-r', lw=3)
			#plt.show()
		else:
			px = [ i for i in range(x,x+w)]
			py = [ i for i in range(y,y+h)]
		
		"""
		py = [ i for i in range(x,x+w)]
		px = [ i for i in range(y,y+h)]
		"""
		positions = []
		for x,y in itertools.product(px,py):
			positions.append([x,y])
		positions = np.array(positions)
		px,py = positions[:,0],positions[:,1]

		features = np.array([])
		region = laplacians[:,py,px]

		for scale in range(t_scales):
			mean = np.mean(region[scale,:])
			std = np.std(region[scale,:])
			data = np.array([mean,std])
			features = np.vstack([features, data ]) if features.size else data
		
		r = img_rgb[py,px,0]
		g = img_rgb[py,px,1]
		b = img_rgb[py,px,2]

		color = np.mean(image_gray[py,px])
		r_color = np.mean(r)
		g_color = np.mean(g)
		b_color = np.mean(b)

		features = np.insert(features,0,color)
		features = np.insert(features,1,r_color)
		features = np.insert(features,2,g_color)
		features = np.insert(features,3,b_color)
		
		training_set = np.vstack([training_set, features ]) if training_set.size else features
		
	attr = ["color","r","g","b"]
	for i in range(t_scales):
		attr.append("mean_" + str(i))
		attr.append("std_" + str(i))

	df_training = pd.DataFrame(data = training_set)
	df_training['label'] = df['label'].values

	filename = dir_input.split('.tif')[0]
	df_training.to_csv(filename + "_training_set.csv",index=False)
Esempio n. 32
0
def extract_features(image_number, slice_number):
    # extracts features for [image_number]_[slice_number]_t1.tif and [image_number]_[slice_number]_t2.tif
    # Input:
    # image_number - Which subject (scalar)
    # slice_number - Which slice (scalar)
    # Output:
    # X           - N x k dataset, where N is the number of pixels and k is the total number of features
    # features    - k x 1 cell array describing each of the k features

    base_dir = '../data/dataset_brains/'

    t1 = plt.imread(base_dir + str(image_number) + '_' + str(slice_number) +
                    '_t1.tif')
    t2 = plt.imread(base_dir + str(image_number) + '_' + str(slice_number) +
                    '_t2.tif')

    fig = plt.figure(figsize=(10, 10))
    ax1 = fig.add_subplot(181)
    ax1.imshow(t1)
    ax2 = fig.add_subplot(182)
    ax2.imshow(t2)

    n = t1.shape[0]
    features = ()

    #display image
    t1f = t1.flatten().T.astype(float)
    t1f = t1f.reshape(-1, 1)
    t2f = t2.flatten().T.astype(float)
    t2f = t2f.reshape(-1, 1)

    X = np.concatenate((t1f, t2f), axis=1)

    features += ('T1 intensity', )
    features += ('T2 intensity', )

    #------------------------------------------------------------------#
    # TODO: Extract more features and add them to X.
    # Don't forget to provide (short) descriptions for the features

    #add blurred images to features
    t1b = ndimage.gaussian_filter(t1, sigma=2)  #blurred t1
    t2b = ndimage.gaussian_filter(t2, sigma=2)  #blurred t2

    #display altered images
    ax3 = fig.add_subplot(183)
    ax3.imshow(t1b)
    ax4 = fig.add_subplot(184)
    ax4.imshow(t2b)

    t1b = t1b.flatten().T.astype(float)
    t1b = t1b.reshape(-1, 1)
    t2b = t2b.flatten().T.astype(float)
    t2b = t2b.reshape(-1, 1)

    X = np.concatenate((X, t1b), axis=1)
    X = np.concatenate((X, t2b), axis=1)

    features += ('T1 blurred intensity', )
    features += ('T2 blurred intensity', )

    #minimum filter
    t1min = ndimage.minimum_filter(t1, size=5)
    t2min = ndimage.minimum_filter(t2, size=5)

    ax5 = fig.add_subplot(185)
    ax5.imshow(t1min)
    ax6 = fig.add_subplot(186)
    ax6.imshow(t2min)

    t1min = t1min.flatten().T.astype(float)
    t1min = t1min.reshape(-1, 1)
    t2min = t2min.flatten().T.astype(float)
    t2min = t2min.reshape(-1, 1)

    X = np.concatenate((X, t1min), axis=1)
    X = np.concatenate((X, t2min), axis=1)

    features += ('T1 minimum intensity', )
    features += ('T2 minimum intensity', )

    #maximum filter
    t1max = ndimage.maximum_filter(t1, size=5)
    t2max = ndimage.maximum_filter(t2, size=5)

    ax7 = fig.add_subplot(187)
    ax7.imshow(t1max)
    ax8 = fig.add_subplot(188)
    ax8.imshow(t2max)

    t1max = t1max.flatten().T.astype(float)
    t1max = t1max.reshape(-1, 1)
    t2max = t2max.flatten().T.astype(float)
    t2max = t2max.reshape(-1, 1)

    X = np.concatenate((X, t1max), axis=1)
    X = np.concatenate((X, t2max), axis=1)

    features += ('T1 maximum intensity', )
    features += ('T2 maximum intensity', )

    #gaussian gradient magnitude
    t1_ggm = ndimage.gaussian_gradient_magnitude(t1, sigma=2)
    t2_ggm = ndimage.gaussian_gradient_magnitude(t2, sigma=2)

    fig1 = plt.figure(figsize=(10, 10))
    ax9 = fig1.add_subplot(181)
    ax9.imshow(t1_ggm)
    ax10 = fig1.add_subplot(182)
    ax10.imshow(t2_ggm)

    t1_ggm = t1_ggm.flatten().T.astype(float)
    t1_ggm = t1_ggm.reshape(-1, 1)
    t2_ggm = t2_ggm.flatten().T.astype(float)
    t2_ggm = t2_ggm.reshape(-1, 1)

    X = np.concatenate((X, t1_ggm), axis=1)
    X = np.concatenate((X, t2_ggm), axis=1)

    features += ('T1 gaussian gradient magnitude intensity', )
    features += ('T2 gaussian gradient magnitude intensity', )

    #gaussian la place (second derivative)
    t1_glp = ndimage.gaussian_laplace(t1, sigma=1)
    t2_glp = ndimage.gaussian_laplace(t2, sigma=1)

    ax11 = fig1.add_subplot(183)
    ax11.imshow(t1_glp)
    ax12 = fig1.add_subplot(184)
    ax12.imshow(t2_glp)

    t1_glp = t1_glp.flatten().T.astype(float)
    t1_glp = t1_glp.reshape(-1, 1)
    t2_glp = t2_glp.flatten().T.astype(float)
    t2_glp = t2_glp.reshape(-1, 1)

    X = np.concatenate((X, t1_glp), axis=1)
    X = np.concatenate((X, t2_glp), axis=1)

    features += ('T1 gaussian la place intensity', )
    features += ('T2 gaussian la place intensity', )

    #median filter (smoothning filter)
    t1_median = ndimage.median_filter(t1, size=5)
    t2_median = ndimage.median_filter(t2, size=5)

    ax13 = fig1.add_subplot(185)
    ax13.imshow(t1_median)
    ax14 = fig1.add_subplot(186)
    ax14.imshow(t2_median)

    t1_median = t1_median.flatten().T.astype(float)
    t1_median = t1_median.reshape(-1, 1)
    t2_median = t2_median.flatten().T.astype(float)
    t2_median = t2_median.reshape(-1, 1)

    X = np.concatenate((X, t1_median), axis=1)
    X = np.concatenate((X, t2_median), axis=1)

    features += ('T1 median intensity', )
    features += ('T2 median intensity', )

    #sobel filter (edge detection, derivative filter, horizontal/vertical lines, some smoothning)
    t1_sobel = ndimage.sobel(t1)
    t2_sobel = ndimage.sobel(t2)

    ax15 = fig1.add_subplot(187)
    ax15.imshow(t1_sobel)
    ax16 = fig1.add_subplot(188)
    ax16.imshow(t2_sobel)

    t1_sobel = t1_sobel.flatten().T.astype(float)
    t1_sobel = t1_sobel.reshape(-1, 1)
    t2_sobel = t2_sobel.flatten().T.astype(float)
    t2_sobel = t2_sobel.reshape(-1, 1)

    X = np.concatenate((X, t1_sobel), axis=1)
    X = np.concatenate((X, t2_sobel), axis=1)

    features += ('T1 sobel intensity', )
    features += ('T2 sobel intensity', )

    # rank filter (smoothning filter)
    t1_rank = ndimage.rank_filter(t1, rank=30, size=10)
    t2_rank = ndimage.rank_filter(t2, rank=30, size=10)

    fig2 = plt.figure(figsize=(10, 10))
    ax17 = fig2.add_subplot(181)
    ax17.imshow(t1_rank)
    ax18 = fig2.add_subplot(182)
    ax18.imshow(t2_rank)

    t1_rank = t1_rank.flatten().T.astype(float)
    t1_rank = t1_rank.reshape(-1, 1)
    t2_rank = t2_rank.flatten().T.astype(float)
    t2_rank = t2_rank.reshape(-1, 1)

    X = np.concatenate((X, t1_sobel), axis=1)
    X = np.concatenate((X, t2_sobel), axis=1)

    features += ('T1 rank intensity', )
    features += ('T2 rank intensity', )

    # prewitt filter (edge detection, derivative filter, horizontal/vertical lines, some smoothning)
    # looks like sobel filter but has different kernel, only horizonal or vertical lines
    t1_prewitt = ndimage.prewitt(t1)
    t2_prewitt = ndimage.prewitt(t2)

    ax19 = fig2.add_subplot(183)
    ax19.imshow(t1_prewitt)
    ax20 = fig2.add_subplot(184)
    ax20.imshow(t2_prewitt)

    t1_prewitt = t1_prewitt.flatten().T.astype(float)
    t1_prewitt = t1_prewitt.reshape(-1, 1)
    t2_prewitt = t2_prewitt.flatten().T.astype(float)
    t2_prewitt = t2_prewitt.reshape(-1, 1)

    X = np.concatenate((X, t1_prewitt), axis=1)
    X = np.concatenate((X, t2_prewitt), axis=1)

    features += ('T1 prewitt intensity', )
    features += ('T2 prewitt intensity', )

    #------------------------------------------------------------------#
    return X, features
Esempio n. 33
0
def enhance_edges(img, HPASS, NUCRAD):
    img = simple_highpassfilter(img.astype(np.float64), HPASS)
    lapgauss_img = -gaussian_laplace(img.astype(np.float32), NUCRAD / 3) * (
        NUCRAD / 3)**2
    edge = -lapgauss_img
    return edge
Esempio n. 34
0
def blob_log(image,
             sigma_list=None,
             scale_choice='linear',
             min_sigma=1,
             max_sigma=50,
             num_sigma=10,
             threshold=.2,
             overlap=.5,
             sigma_ratio=2.,
             weighting=None,
             merge_overlap_dist=1.0,
             refine_shape=False,
             use_max_response=False):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    sigma_list : np.ndarray, optional
        Provide the list of sigmas to use.
    scale_choice : str, optional
        'log', 'linear' or 'ratio'. Determines how the scales are calculated
        based on the given number, minimum, maximum, or ratio.
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel. Keep this low to
        detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel. Keep this high to
        detect larger blobs.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    weighting : np.ndarray, optional
        Used to weight certain scales differently when selecting local maxima
        in the transform space. For example when searching for regions near
        the beam size, the transform can be down-weighted to avoid spurious
        detections. Must have the same number of elements as the scales.
    merge_overlap_dist : float, optional
        Controls the minimum overlap regions must have to be merged together.
        Defaults to one sigma separation, where sigma is one of the scales
        used in the transform.

    Returns
    -------
    A : (n, 5) ndarray
        A 2d array with each row representing 5 values,
        ``(y, x, semi-major sigma, semi-minor sigma, pa)``
        where ``(y,x)`` are coordinates of the blob, ``semi-major sigma`` and
        ``semi-minor sigma`` are the standard deviations of the elliptical blob,
        and ``pa`` is its position angle.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[ 113.        ,  323.        ,    1.        ],
           [ 121.        ,  272.        ,   17.33333333],
           [ 124.        ,  336.        ,   11.88888889],
           [ 126.        ,   46.        ,   11.88888889],
           [ 126.        ,  208.        ,   11.88888889],
           [ 127.        ,  102.        ,   11.88888889],
           [ 128.        ,  154.        ,   11.88888889],
           [ 185.        ,  344.        ,   17.33333333],
           [ 194.        ,  213.        ,   17.33333333],
           [ 194.        ,  276.        ,   17.33333333],
           [ 197.        ,   44.        ,   11.88888889],
           [ 198.        ,  103.        ,   11.88888889],
           [ 198.        ,  155.        ,   11.88888889],
           [ 260.        ,  174.        ,   17.33333333],
           [ 263.        ,  244.        ,   17.33333333],
           [ 263.        ,  302.        ,   17.33333333],
           [ 266.        ,  115.        ,   11.88888889]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}sigma`.
    """

    # assert_nD(image, 2)

    image = img_as_float(image)

    if sigma_list is None:
        if scale_choice is 'log':
            start, stop = log(min_sigma, 10), log(max_sigma, 10)
            sigma_list = np.logspace(start, stop, num_sigma)
        elif scale_choice is 'linear':
            sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
        elif scale_choice is 'ratio':
            # k such that min_sigma*(sigma_ratio**k) > max_sigma
            k = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1
            sigma_list = np.array(
                [min_sigma * (sigma_ratio**i) for i in range(k)])
        else:
            raise ValueError("scale_choice must be 'log', 'linear', or "
                             "'ratio'.")

    if weighting is not None:
        if len(weighting) != len(sigma_list):
            raise IndexError("weighting must have the same number of elements"
                             " as scales (" + str(len(sigma_list)) + ").")
    else:
        weighting = np.ones_like(sigma_list)

    # computing gaussian laplace
    # s**2 provides scale invariance
    # weighting by w changes the relative importance of each transform scale
    gl_images = [
        gaussian_laplace(image, s) * s**2 * w
        for s, w in zip(sigma_list, weighting)
    ]
    image_cube = np.dstack(gl_images)

    if use_max_response:
        scale_peaks = \
            peak_local_max(image_cube.max(2),
                           threshold_abs=threshold,
                           threshold_rel=0.0,
                           min_distance=0.5 * np.sqrt(2) * sigma_list[0],
                           exclude_border=False)

        argmaxes = image_cube.argmax(2)[scale_peaks[:, 0], scale_peaks[:, 1]]
        radii = np.array([sigma_list[arg] for arg in argmaxes]) * np.sqrt(2)
        radii = radii[:, np.newaxis]

        responses = image_cube.max(2)[scale_peaks[:, 0], scale_peaks[:, 1]]
        responses = responses[:, np.newaxis]

        pas = np.zeros_like(radii)
        local_maxima = np.hstack([scale_peaks, radii, radii, pas, responses])
    else:
        for i, scale in enumerate(sigma_list):
            scale_peaks = peak_local_max(image_cube[:, :, i],
                                         threshold_abs=threshold,
                                         min_distance=np.sqrt(2) * scale,
                                         threshold_rel=0.0,
                                         exclude_border=False)

            new_scale_peaks = np.empty((len(scale_peaks), 5))
            if refine_shape:
                for j, peak in enumerate(scale_peaks):
                    new_peak = np.array([peak[0], peak[1], scale, scale, 0.0])
                    new_scale_peaks[j] = \
                        shape_from_blob_moments(new_peak, image_cube[:, :, i])
            else:
                new_scale_peaks[:, :2] = scale_peaks
                # sqrt(2) size correction
                new_scale_peaks[:, 2:4] = np.sqrt(2) * scale
                new_scale_peaks[:, 4] = 0.0
                vals = \
                    np.array([image_cube[pos[0], pos[1], i]
                              for pos in scale_peaks]).reshape((len(scale_peaks),
                                                                1))
                new_scale_peaks = np.hstack([new_scale_peaks, vals])

            if i == 0:
                local_maxima = new_scale_peaks
            else:
                local_maxima = np.vstack([local_maxima, new_scale_peaks])

    if local_maxima.size == 0:
        return local_maxima

    # Merge regions into ellipses
    local_maxima = _merge_blobs(local_maxima, merge_overlap_dist)

    # Then prune and return them\
    return local_maxima
Esempio n. 35
0
def laplacian_of_gaussian(img, sig):
    return nd.gaussian_laplace(img, sigma=sig)
Esempio n. 36
0
 def run(self, ips, snap, img, para=None):
     nimg.gaussian_laplace(snap, para['sigma'], output=img)
     img *= -1
     if para['uniform']:
         np.add(img, np.mean(ips.range), out=img, casting='unsafe')
Esempio n. 37
0
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
             overlap=.5, log_scale=False):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : 2D or 3D ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel. Keep this low to
        detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel. Keep this high to
        detect larger blobs.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.

    Returns
    -------
    A : (n, image.ndim + 1) ndarray
        A 2d array with each row representing 3 values for a 2D image,
        and 4 values for a 3D image: ``(r, c, sigma)`` or ``(p, r, c, sigma)``
        where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and
        ``sigma`` is the standard deviation of the Gaussian kernel which
        detected the blob.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[ 266.        ,  115.        ,   11.88888889],
           [ 263.        ,  302.        ,   17.33333333],
           [ 263.        ,  244.        ,   17.33333333],
           [ 260.        ,  174.        ,   17.33333333],
           [ 198.        ,  155.        ,   11.88888889],
           [ 198.        ,  103.        ,   11.88888889],
           [ 197.        ,   44.        ,   11.88888889],
           [ 194.        ,  276.        ,   17.33333333],
           [ 194.        ,  213.        ,   17.33333333],
           [ 185.        ,  344.        ,   17.33333333],
           [ 128.        ,  154.        ,   11.88888889],
           [ 127.        ,  102.        ,   11.88888889],
           [ 126.        ,  208.        ,   11.88888889],
           [ 126.        ,   46.        ,   11.88888889],
           [ 124.        ,  336.        ,   11.88888889],
           [ 121.        ,  272.        ,   17.33333333],
           [ 113.        ,  323.        ,    1.        ]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
    a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
    """
    image = img_as_float(image)

    if log_scale:
        start, stop = log(min_sigma, 10), log(max_sigma, 10)
        sigma_list = np.logspace(start, stop, num_sigma)
    else:
        sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)

    # computing gaussian laplace
    # s**2 provides scale invariance
    gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list]

    image_cube = np.stack(gl_images, axis=-1)

    local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3,) * (image.ndim + 1)),
                                  threshold_rel=0.0,
                                  exclude_border=False)

    # Catch no peaks
    if local_maxima.size == 0:
        return np.empty((0, 3))
    # Convert local_maxima to float64
    lm = local_maxima.astype(np.float64)
    # Convert the last index to its corresponding scale value
    lm[:, -1] = sigma_list[local_maxima[:, -1]]
    return _prune_blobs(lm, overlap)
    # from skimage.external import tifffile
    # tif_path = r"C:\Users\Administrator\Desktop\realData\ins_gt(1).tif"
    # a = tifffile.imread(tif_path)

    # a = np.zeros((10, 10, 10))
    # a[3] = 1
    # a[2:9, 3:5, 4:6] = 1

    # a = np.zeros((100, 100))
    # a[23:, 78:81] = 1
    # a[13:, 23:28] = 1

    distance, ind = ndimage.distance_transform_edt(a, return_indices=True)
    df = direct_field3D(a)

    gl0 = ndimage.gaussian_laplace(df[0, ...], sigma=1)
    gl1 = ndimage.gaussian_laplace(df[1, ...], sigma=1)
    sq = np.abs(gl0) + np.abs(gl1)

    import matplotlib.pyplot as plt
    gl0_ = (gl0 < -0.1).astype(int)
    gl1_ = (gl1 < -0.1).astype(int)
    gl_sq = (sq > 0.4).astype(int)

    # plt.imshow(np.stack([gl0_, gl1_]), cmap=plt.cm.gray)
    _, axs = plt.subplots(1, 3)
    axs[0].imshow(gl0_)
    axs[1].imshow(gl1_)
    axs[2].imshow(gl_sq)
    plt.show()
Esempio n. 39
0
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
             overlap=.5, log_scale=False, *, exclude_border=False):
    r"""Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : 2D or 3D ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : scalar or sequence of scalars, optional
        the minimum standard deviation for Gaussian kernel. Keep this low to
        detect smaller blobs. The standard deviations of the Gaussian filter
        are given for each axis as a sequence, or as a single number, in
        which case it is equal for all axes.
    max_sigma : scalar or sequence of scalars, optional
        The maximum standard deviation for Gaussian kernel. Keep this high to
        detect larger blobs. The standard deviations of the Gaussian filter
        are given for each axis as a sequence, or as a single number, in
        which case it is equal for all axes.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.
    exclude_border : int or bool, optional
        If nonzero int, `exclude_border` excludes blobs from
        within `exclude_border`-pixels of the border of the image.

    Returns
    -------
    A : (n, image.ndim + sigma) ndarray
        A 2d array with each row representing 2 coordinate values for a 2D
        image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
        When a single sigma is passed, outputs are:
        ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
        ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
        deviation of the Gaussian kernel which detected the blob. When an
        anisotropic gaussian is used (sigmas per dimension), the detected sigma
        is returned for each dimension.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[ 266.        ,  115.        ,   11.88888889],
           [ 263.        ,  302.        ,   17.33333333],
           [ 263.        ,  244.        ,   17.33333333],
           [ 260.        ,  174.        ,   17.33333333],
           [ 198.        ,  155.        ,   11.88888889],
           [ 198.        ,  103.        ,   11.88888889],
           [ 197.        ,   44.        ,   11.88888889],
           [ 194.        ,  276.        ,   17.33333333],
           [ 194.        ,  213.        ,   17.33333333],
           [ 185.        ,  344.        ,   17.33333333],
           [ 128.        ,  154.        ,   11.88888889],
           [ 127.        ,  102.        ,   11.88888889],
           [ 126.        ,  208.        ,   11.88888889],
           [ 126.        ,   46.        ,   11.88888889],
           [ 124.        ,  336.        ,   11.88888889],
           [ 121.        ,  272.        ,   17.33333333],
           [ 113.        ,  323.        ,    1.        ]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
    a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
    """
    image = img_as_float(image)

    # if both min and max sigma are scalar, function returns only one sigma
    scalar_sigma = (
        True if np.isscalar(max_sigma) and np.isscalar(min_sigma) else False
    )

    # Gaussian filter requires that sequence-type sigmas have same
    # dimensionality as image. This broadcasts scalar kernels
    if np.isscalar(max_sigma):
        max_sigma = np.full(image.ndim, max_sigma, dtype=float)
    if np.isscalar(min_sigma):
        min_sigma = np.full(image.ndim, min_sigma, dtype=float)

    # Convert sequence types to array
    min_sigma = np.asarray(min_sigma, dtype=float)
    max_sigma = np.asarray(max_sigma, dtype=float)

    if log_scale:
        start, stop = np.log10(min_sigma)[:, None], np.log10(max_sigma)[:, None]
        space = np.concatenate(
            [start, stop, np.full_like(start, num_sigma)], axis=1)
        sigma_list = np.stack([np.logspace(*s) for s in space], axis=1)
    else:
        scale = np.linspace(0, 1, num_sigma)[:, None]
        sigma_list = scale * (max_sigma - min_sigma) + min_sigma

    # computing gaussian laplace
    # average s**2 provides scale invariance
    gl_images = [-gaussian_laplace(image, s) * s ** 2
                 for s in np.mean(sigma_list, axis=1)]

    image_cube = np.stack(gl_images, axis=-1)

    local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3,) * (image.ndim + 1)),
                                  threshold_rel=0.0,
                                  exclude_border=exclude_border)

    # Catch no peaks
    if local_maxima.size == 0:
        return np.empty((0, 3))

    # Convert local_maxima to float64
    lm = local_maxima.astype(np.float64)

    # translate final column of lm, which contains the index of the
    # sigma that produced the maximum intensity value, into the sigma
    sigmas_of_peaks = sigma_list[local_maxima[:, -1]]

    if scalar_sigma:
        # select one sigma column, keeping dimension
        sigmas_of_peaks = sigmas_of_peaks[:, 0:1]

    # Remove sigma index and replace with sigmas
    lm = np.hstack([lm[:, :-1], sigmas_of_peaks])

    return _prune_blobs(lm, overlap)
Esempio n. 40
0
def gaussianLaplaceBlobDetection(originalImage):
	originalImageArray = numpy.asarray(originalImage)
	blobDetectedImageArray = ndimage.gaussian_laplace(originalImageArray,sigma=2)
	blobDetectedImage = Image.fromarray(blobDetectedImageArray)
	return blobDetectedImage
Esempio n. 41
0
    def _update_spline_plot(self):
        """Update the spline plot."""
        knots = np.mgrid[0:1:((self.num_internal_knots + 2)*1j)][1:-1]
        medial_repr = self.current_object.aligned_version.medial_repr
        dependent_variable = np.mgrid[0:1:(medial_repr.length * 1j)]
        laplacian = ndimage.gaussian_laplace(medial_repr.width_curve, 
            self.smoothing, mode='constant', cval=np.nan)
        m_spline = LSQUnivariateSpline(dependent_variable,
            medial_repr.medial_axis, knots)
        w_spline = LSQUnivariateSpline(dependent_variable,
            medial_repr.width_curve, knots)
        # sample at double the frequency
        spl_dep_var = np.mgrid[0:1:(medial_repr.length * 2j)]
        plots = self.plots
        if plots is None:
            # Render the plot for the first time.
            plotdata = ArrayPlotData(medial_x=dependent_variable,
                medial_y=medial_repr.medial_axis,
                width_x=dependent_variable,
                width_y=medial_repr.width_curve,
                medial_spline_x=spl_dep_var,
                medial_spline_y=m_spline(spl_dep_var),
                width_spline_x=spl_dep_var,
                width_spline_y=w_spline(spl_dep_var),
                laplacian_y=laplacian,
            )
            plot = Plot(plotdata)
            
            
            # Width data 
            self._width_data_renderer, = plot.plot(("width_x", "width_y"),
                type="line", color="blue", name="Original width curve data")
            
            filterdata = ArrayPlotData(
                            x=dependent_variable,
                            laplacian=laplacian
                        )
            filterplot = Plot(filterdata)
            self._laplacian_renderer, = filterplot.plot(("x",
                            "laplacian"), type="line", color="black", 
                            name="Laplacian-of-Gaussian")
            
            # Titles for plot & axes
            plot.title = "Width curves"
            plot.x_axis.title = "Normalized position on medial axis"
            plot.y_axis.title = "Fraction of medial axis width"
                        
            # Legend mangling stuff
            legend = plot.legend
            plot.legend = None
            legend.set(
                    component = None,
                    visible = True,
                    resizable = "",
                    auto_size=True, 
                    bounds = [250, 70],
                    padding_top = plot.padding_top)
            
            filterlegend = filterplot.legend
            filterplot.legend = None
            filterlegend.set(
                    component = None,
                    visible = True,
                    resizable = "",
                    auto_size=True, 
                    bounds = [250, 50],
                    padding_top = filterplot.padding_top)
            
            self.plots = GridPlotContainer(plot, legend, filterplot,
                                        filterlegend, shape=(2,2),
                                        valign="top", bgcolor="transparent")
            
            
        else:

            # Update the real width curve
            self._width_data_renderer.index.set_data(dependent_variable)
            self._width_data_renderer.value.set_data(medial_repr.width_curve)
            
            # Render the Laplacian
            self._laplacian_renderer.index.set_data(dependent_variable)
            self._laplacian_renderer.value.set_data(laplacian)
Esempio n. 42
0
def gaussian_Laplace1(left):
    return ndimage.gaussian_laplace(left,sigma=1)
Esempio n. 43
0
def find_peaks(img, band_shape=(3, 25), show_images=False, save_images=False):
    """

    :param img:
    :param band_shape:  (height, width) akak (y, x)
    :param show_images:
    :param save_images:
    :return:
    """

    # Fixed: peaks are shifted, probably because opening() makes a shift from the structuring element.
    # Edit, no it is the convolution that does it...
    #
    img_org = img
    img = img.astype('f')  # cast to float, otherwise all calculations become inaccurate

    images = []
    band_selem = np.ones((3, 29))

    ploti = 0  # start at zero, and show_image will deal with it.
    if show_images:
        ploti = show_image(img, title="original", plotidx=ploti)

    title = "original"
    descr = "img"  # aka "history"
    images.append((img, title, descr))

    # #
    # # opening - small:
    # title, descr = "opening-3x21", "opening(%s)" % descr
    # print(title)
    # opened = opening(img, selem=np.ones((3, 21)))
    # # images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(opened, title=title, plotidx=ploti)
    #
    # #
    # # opening - medium:
    # title, descr = "opening-3x25", "opening(%s)" % descr
    # print(title)
    # opened = opening(img, selem=np.ones((3, 25)))
    # # images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(opened, title=title, plotidx=ploti)
    #
    #
    # opening - larger:
    title, descr = "opening-3x23", "opening(%s)" % descr
    print(title)
    img = opened1 = opening(img, selem=np.ones((3, 23)))
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # subtract global percentile:
    title = "subtract_global_percentile"
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = minus_global_pct_bg = subtract_global_percentile(img, percentile=30)
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)



    #
    # rolling-minimum background subtraction with large ellipse:
    title = "rolling_5percentile_bg_el"
    descr = "%s(%s)" % (title, descr)
    print(title)
    rol_min_el = rolling_minimum_background(gaussian_filter(img, sigma=2), percentile=5,
                                             size=(71, 71),  # height, width (should be odd integers)
                                             geometry='ellipse')
    if show_images:
        ploti = show_image(rol_min_el, title=title, plotidx=ploti, clim_percentile=99.9)
    # subtract the background:
    title = "minus-rol_min_el"
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = np.clip(img - rol_min_el, 0, None)  # remember to clip at zero
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # rolling-minimum background subtraction
    title = "rolling_5percentile_bg"
    descr = "%s(%s)" % (title, descr)
    print(title)
    rol_min_bg = rolling_minimum_background(gaussian_filter(img, sigma=2), percentile=5)
    if show_images:
        ploti = show_image(rol_min_bg, title=title, plotidx=ploti, clim_percentile=99.9)
    # subtract the background:
    title = "minus-rol_min_bg"
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = np.clip(img - rol_min_bg, 0, None)  # remember to clip at zero
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)
    print("np.all(rol_min_bg == rol_min_el):", np.all(rol_min_bg == rol_min_el))

    # #
    # # subtract_row_col_percentile background subtraction:
    # title = "subtract_row_col_percentile"
    # descr = "%s(%s)" % (title, descr)
    # print(title)
    # img = background_subtracted_rcp = subtract_row_col_percentile(
    #     img, percentile=30, filters=savgol_filter, window_length=11, polyorder=1
    # )
    # images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(img, title=title, plotidx=ploti)

    #
    # opening, again:
    title, descr = "opening-%sx%s" % band_shape, "opening(%s)" % descr
    print(title)
    img = opened2 = opening(img, selem=np.ones(band_shape))
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # convolve:
    # default mode='full' will shift output, use mode='same' to prevent shifting
    title, descr = "convolved", "convolved(%s)" % descr
    print(title)
    img = convolved = convolve2d(img, band_selem/band_selem.sum(), mode='same')
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # low-percentile filter to narrow the bands:
    # (don't apply further openings or band-shape specific convolutions after narrowing the bands!)
    size = (3, 21)
    title = "pct_filtered-%sx%s" % size
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = pct_filtered = percentile_filter(img, percentile=10, size=size)
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # gaussian:
    title, descr = "gaussian_filter", "gaussian_filter(%s)" % descr
    print(title)
    img = gaussianed = gaussian_filter(img, sigma=1)
    images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(img, title="gaussianed", plotidx=ploti)

    # Peaks!
    print("Finding peaks...")
    peak_pos = peak_local_max(img,
                              min_distance=10,
                              # threshold_abs=3,
                              # threshold_rel=0.01  # values must be 0.01 * maximum_value
                             )
    print("peak_pos.shape", peak_pos.shape)

    #
    # Draw peaks on a copy of the image:
    img = img.copy()  # otherwise we will write on convolved
    for pos in peak_pos:
        draw_rectangle(img, pos, width=2, val=255, border=0, center_val=None)
    ploti = show_image(img, title="peaks", plotidx=ploti)

    #
    # Other visualizations:
    ggm_filtered = gaussian_gradient_magnitude(convolved, sigma=1.0)
    ploti = show_image(ggm_filtered, title="gaussian_gradient_magnitude",
                       plotidx=ploti, clim=(0, np.percentile(ggm_filtered, 99.9)))

    title, descr = "glaplace of convolved", "laplace of convolved"
    # laplaced = laplace(convolved)
    laplaced = gaussian_laplace(convolved, sigma=2)
    ploti = show_image(laplaced, title=title, plotidx=ploti,
                       cmap="gray_r",
                       clim_percentile=(1, 99))

    lggm = laplaced*(ggm_filtered-3)
    ploti = show_image(lggm, title="laplaced*(ggm_filtered-3)", plotidx=ploti,
                       cmap="gray_r",
                       clim_percentile=(1, 99))

    title, descr = "glaplace of opened1", "laplace of opened1"
    # laplaced = laplace(convolved)
    laplaced = gaussian_laplace(opened1, sigma=2)
    ploti = show_image(laplaced, title=title, plotidx=ploti,
                       cmap="gray_r",
                       clim_percentile=(10, 90))

    if save_images:
        return peak_pos, images
    else:
        return peak_pos
Esempio n. 44
0
def gaussian_second_deri(image):
    """
    TODO:need to change the input of sigma here. Using gaussian second derivatives.
    """
    return ndimage.gaussian_laplace(image, sigma=2)
Esempio n. 45
0
def LoG(image, sigma):
    result = ndimage.gaussian_laplace(image, sigma)

    return result
Esempio n. 46
0
    def SpotDetector(self, input_image, AnalysisGui, nuclei_image,
                     spot_channel):

        self.UPDATE_SPOT_ANALYSIS_PARAMS()
        if AnalysisGui.spotchannelselect.currentText() == 'All':

            params_to_pass = self.spot_params_dict['Ch1']
        else:
            params_to_pass = self.spot_params_dict[spot_channel]

        uint8_max_val = 255

        ## First blurring round
        median_img = cv2.medianBlur(nuclei_image, 11)
        ## Threhsolding and Binarizing
        ret, thresh = cv2.threshold(median_img, 0, uint8_max_val,
                                    cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        bin_img = (1 - thresh / uint8_max_val).astype('bool')
        ## Binary image filling
        filled = ndimage.binary_fill_holes(bin_img)
        struct = ndimage.generate_binary_structure(2, 2)
        filled = ndimage.binary_dilation(filled,
                                         structure=struct).astype(filled.dtype)
        filled = ndimage.binary_dilation(filled,
                                         structure=struct).astype(filled.dtype)
        #### this part is for removing bright junk in the image################
        labeled_nuc, num_features_nuc = label(filled)
        props = regionprops_table(labeled_nuc,
                                  input_image,
                                  properties=('label', 'area', 'max_intensity',
                                              'mean_intensity'))
        props_df = pd.DataFrame(props)
        mean_intensity_ave = props_df['mean_intensity'].mean()
        max_intensity_max = props_df['max_intensity'].max()
        for ind, row in props_df.iterrows():

            if row['mean_intensity'] > 2 * mean_intensity_ave:

                input_image[labeled_nuc == row['label']] = 0
        input_image[input_image > max_intensity_max] = 0
        input_image = cv2.normalize(input_image,
                                    None,
                                    0,
                                    255,
                                    cv2.NORM_MINMAX,
                                    dtype=cv2.CV_8U)
        ########################################
        sig = params_to_pass[3]
        if str(params_to_pass[0]) == '0':

            log_result = ndimage.gaussian_laplace(input_image, sigma=sig)
            if str(params_to_pass[1]) == '0':

                ret_log, thresh_log = cv2.threshold(
                    log_result, 0, 255,
                    cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
                bin_img_log = (1 - thresh_log / 255).astype('bool')

            if str(params_to_pass[1]) == '1':

                manual_threshold = np.ceil(params_to_pass[2] *
                                           2.55).astype(int)
                thresh_log = log_result > manual_threshold
                bin_img_log = thresh_log
#             bin_img_log = (1-thresh_log/255).astype('bool')
            spots_img_log = (bin_img_log * 255).astype('uint8')
            kernel = np.ones((3, 3), np.uint8)
            spot_openned_log = cv2.morphologyEx(spots_img_log, cv2.MORPH_OPEN,
                                                kernel)
            final_spots = np.multiply(spot_openned_log, filled)

        if str(params_to_pass[0]) == '1':

            result_gaussian = ndimage.gaussian_filter(input_image, sigma=sig)

            if str(params_to_pass[1]) == '0':

                ret_log, thresh_log = cv2.threshold(
                    result_gaussian, 0, 255,
                    cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
                bin_img_g = (1 - thresh_log / 255).astype('bool')

            if str(params_to_pass[1]) == '1':

                manual_threshold = np.ceil(params_to_pass[2] *
                                           2.55).astype(int)

                thresh_log = result_gaussian > manual_threshold
                bin_img_g = thresh_log

            spots_img_g = (bin_img_g * 255).astype('uint8')
            kernel = np.ones((3, 3), np.uint8)
            spot_openned_g = cv2.morphologyEx(spots_img_g, cv2.MORPH_OPEN,
                                              kernel)
            final_spots = np.multiply(spot_openned_g, filled)

        ### center of mass calculation
        if str(AnalysisGui.SpotLocationCbox.currentIndex()) == '0':

            labeled_spots, num_features = label(final_spots)
            spot_labels = np.unique(labeled_spots)

            bin_img = (final_spots / uint8_max_val).astype('bool')
            ## Binary image filling
            masked_spots = np.multiply(input_image, bin_img)

            spot_locations = ndimage.measurements.center_of_mass(
                masked_spots, labeled_spots, spot_labels[spot_labels > 0])

            ###### Brightest spot calculation
        if str(AnalysisGui.SpotLocationCbox.currentIndex()) == '1':

            labeled_spots, num_features = label(final_spots)
            spot_labels = np.unique(labeled_spots)
            bin_img = (final_spots / uint8_max_val).astype('bool')
            masked_spots = np.multiply(input_image, bin_img)
            spot_locations = peak_local_max(masked_spots,
                                            labels=labeled_spots,
                                            num_peaks_per_label=1)

            ##### Centroid calculation
        if str(AnalysisGui.SpotLocationCbox.currentIndex()) == '2':

            labeled_spots, num_features = label(final_spots)
            spot_labels = np.unique(labeled_spots)

            spot_locations = ndimage.measurements.center_of_mass(
                final_spots, labeled_spots, spot_labels[spot_labels > 0])

        return spot_locations, final_spots
Esempio n. 47
0
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
             overlap=.5, log_scale=False):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : 2D or 3D ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel. Keep this low to
        detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel. Keep this high to
        detect larger blobs.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.

    Returns
    -------
    A : (n, image.ndim + 1) ndarray
        A 2d array with each row representing 3 values for a 2D image,
        and 4 values for a 3D image: ``(r, c, sigma)`` or ``(f, r, c, sigma)``
        where ``(r, c)`` or ``(f, r, c)`` are coordinates of the blob and
        ``sigma`` is the standard deviation of the Gaussian kernel which
        detected the blob.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[ 113.        ,  323.        ,    1.        ],
           [ 121.        ,  272.        ,   17.33333333],
           [ 124.        ,  336.        ,   11.88888889],
           [ 126.        ,   46.        ,   11.88888889],
           [ 126.        ,  208.        ,   11.88888889],
           [ 127.        ,  102.        ,   11.88888889],
           [ 128.        ,  154.        ,   11.88888889],
           [ 185.        ,  344.        ,   17.33333333],
           [ 194.        ,  213.        ,   17.33333333],
           [ 194.        ,  276.        ,   17.33333333],
           [ 197.        ,   44.        ,   11.88888889],
           [ 198.        ,  103.        ,   11.88888889],
           [ 198.        ,  155.        ,   11.88888889],
           [ 260.        ,  174.        ,   17.33333333],
           [ 263.        ,  244.        ,   17.33333333],
           [ 263.        ,  302.        ,   17.33333333],
           [ 266.        ,  115.        ,   11.88888889]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}sigma` for
    a 2-D image and :math:`\sqrt{3}sigma` for a 3-D image.
    """
    image = img_as_float(image)

    if log_scale:
        start, stop = log(min_sigma, 10), log(max_sigma, 10)
        sigma_list = np.logspace(start, stop, num_sigma)
    else:
        sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)

    # computing gaussian laplace
    # s**2 provides scale invariance
    gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list]

    # Replace by image_cube = np.stack(hessian_images, axis=-1)
    # When we upgrade minimal requirements to NumPy 1.10
    sl = (slice(None),) * image.ndim + (np.newaxis,)
    arrays = [np.asanyarray(arr) for arr in gl_images]
    extended_arrays = [arr[sl] for arr in arrays]
    image_cube = np.concatenate(extended_arrays, axis=-1)

    local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3,) * (image.ndim + 1)),
                                  threshold_rel=0.0,
                                  exclude_border=False)

    # Convert local_maxima to float64
    lm = local_maxima.astype(np.float64)
    # Convert the last index to its corresponding scale value
    lm[:, -1] = sigma_list[local_maxima[:, -1]]
    return _prune_blobs(lm, overlap)
Esempio n. 48
0
    def train(self):
        """Train encoder, generator and discriminator."""

        #====================================== Training ===========================================#
        #===========================================================================================#

        unet_path = os.path.join(
            self.current_model_saving_path, '%s-%s-%.4f-%d-%d-%d-best.pkl' %
            (self.model_type, self.optimizer_choice, self.initial_lr,
             self.num_epochs, self.batch_size, self.down_factor))
        last_unet_path = os.path.join(
            self.current_model_saving_path, '%s-%s-%.4f-%d-%d-%d-last.pkl' %
            (self.model_type, self.optimizer_choice, self.initial_lr,
             self.num_epochs, self.batch_size, self.down_factor))
        print('The U-Net path is {}'.format(unet_path))
        # U-Net Train
        # Train loss history (R&R)
        train_loss_history = []
        # Validation loss history (R&R)
        validation_loss_history = []

        if os.path.isfile(unet_path):
            # Load the pretrained Encoder
            self.unet.load_state_dict(torch.load(unet_path))
            print('%s is Successfully Loaded from %s' %
                  (self.model_type, unet_path))

        else:
            # Train for Encoder
            best_unet_score = 0.
            print('Start training. The initial learning rate is: {}'.format(
                self.initial_lr))

            # Write the first line of the train and validation loss history csv file.
            with open(os.path.join(self.current_loss_history_path, 'train_and_validation_history.csv'), 'a', \
              encoding = 'utf-8', newline= '') as f:
                wr = csv.writer(f)
                wr.writerow([
                    'Mode', 'Current Epoch', 'Total Epoch', 'Batch Size',
                    'Metric', 'Loss'
                ])
                f.close()

            for epoch in range(self.num_epochs):
                self.unet.train(True)
                train_epoch_loss = 0
                validation_epoch_loss = 0

                length = 0
                start_time = timeit.default_timer()

                for batch, (img, GT) in enumerate(self.train_loader):
                    img = img.to(self.device)
                    GT = GT.to(self.device)

                    # Reshape the images and GTs to 4-dimensional so that they can get fed to the conv2d layer. (R&R)
                    # The new shape has to be (batch_size, num_channels, img_dim1, img_dim2).
                    if self.img_ch == 1:
                        img = img[:, np.newaxis, :, :]
                    else:
                        img = img.transpose(1, 3)
                        img = img.transpose(2, 3)

                    if self.GT_ch == 1:
                        GT = GT[:, np.newaxis, :, :]
                    else:
                        GT = GT.transpose(1, 3)
                        GT = GT.transpose(2, 3)

                    #  SR : Segmentation Result
                    SR = torch.sigmoid(self.unet(img))

                    # Flatten the prediction, target, and training field.
                    SR_flat = SR.view(SR.size(0), -1)
                    GT_flat = GT.view(GT.size(0), -1)

                    # Compute the loss for this batch.
                    if self.loss_function_name == 'Dice':
                        train_loss = self.dice_coeff_loss(SR_flat, GT_flat)
                    else:
                        train_loss = self.loss_function(SR_flat, GT_flat)

                    # Add the loss of this batch to the loss of this epoch.
                    train_epoch_loss += train_loss.item()
                    if self.edge_enhance == 'True':
                        GT_edge_enhanced = ndimage.gaussian_laplace(np.squeeze(
                            GT.cpu().detach().numpy()),
                                                                    sigma=5)
                        GT_edge1 = torch.tensor(
                            np.int64(GT_edge_enhanced < -0.001))
                        GT_edge2 = torch.tensor(
                            np.int64(GT_edge_enhanced > 0.001))
                        y_hat = torch.cat((torch.squeeze(SR_flat),
                                           torch.squeeze(SR)[GT_edge1 == 1],
                                           torch.squeeze(SR)[GT_edge2 == 1]),
                                          0)
                        y = torch.cat((torch.squeeze(GT_flat),
                                       torch.squeeze(GT)[GT_edge1 == 1],
                                       torch.squeeze(GT)[GT_edge2 == 1]), 0)
                    elif self.edge_enhance == 'Double':
                        GT_edge_enhanced = ndimage.gaussian_laplace(np.squeeze(
                            GT.cpu().detach().numpy()),
                                                                    sigma=5)
                        GT_edge1 = torch.tensor(
                            np.int64(GT_edge_enhanced < -0.001))
                        GT_edge2 = torch.tensor(
                            np.int64(GT_edge_enhanced > 0.001))
                        y_hat = torch.cat((torch.squeeze(SR_flat),
                                           torch.squeeze(SR)[GT_edge1 == 1],
                                           torch.squeeze(SR)[GT_edge1 == 1],
                                           torch.squeeze(SR)[GT_edge2 == 1]),
                                          0)
                        y = torch.cat((torch.squeeze(GT_flat),
                                       torch.squeeze(GT)[GT_edge1 == 1],
                                       torch.squeeze(GT)[GT_edge1 == 1],
                                       torch.squeeze(GT)[GT_edge2 == 1]), 0)

                    train_loss = self.loss_function(y_hat, y)

                    # Backprop + optimize
                    self.reset_grad()
                    train_loss.backward()
                    self.optimizer.step()

                    ### if batch size = 1  ###
                    length += 1

                    if batch % 200 == 0:
                        print(
                            '[Training] Epoch [{}/{}], Batch: {}, Batch size: {}, Average {} Error: {}'
                            .format(epoch + 1, self.num_epochs, batch,
                                    self.batch_size, self.loss_function_name,
                                    train_epoch_loss / length))

# Empty cache to free up memory at the end of each batch.
                    del batch, img, GT, SR, GT_flat, SR_flat, train_loss
                    torch.cuda.empty_cache()

                end_time = timeit.default_timer()
                # Normalize the train loss over the length of the epoch (number of images in this epoch).
                train_epoch_loss = train_epoch_loss / length

                # Print the log info
                print(
                    '[Training] Epoch [%d/%d], Train Loss: %.6f, Run Time: %.4f [h]'
                    % (epoch + 1, self.num_epochs, train_epoch_loss,
                       (end_time - start_time) / 60 / 60))

                # Append train loss to train loss history (R&R)
                train_loss_history.append(train_epoch_loss)
                with open(os.path.join(self.current_loss_history_path, 'train_and_validation_history.csv'), 'a', \
                                     encoding = 'utf-8', newline= '') as f:
                    wr = csv.writer(f)
                    wr.writerow(['Training', '%d' % (epoch + 1), '%d' % (self.num_epochs), '%d' % (self.batch_size), \
                                              '%s' % self.loss_function_name, '%.6f' % train_epoch_loss])
                    f.close()

                #===================================== Validation ====================================#
                self.unet.train(False)
                self.unet.eval()

                length = 0
                start_time = timeit.default_timer()

                for batch, (img, GT) in enumerate(self.validation_loader):
                    # Read, reshape the GTs and images, and compute the target images.
                    img = img.to(self.device)
                    GT = GT.to(self.device)

                    # Reshape the images and GTs to 4-dimensional so that they can get fed to the conv2d layer. (R&R)
                    # The new shape has to be (batch_size, num_channels, img_dim1, img_dim2).
                    if self.img_ch == 1:
                        img = img[:, np.newaxis, :, :]
                    else:
                        img = img.transpose(1, 3)
                        img = img.transpose(2, 3)

                    if self.GT_ch == 1:
                        GT = GT[:, np.newaxis, :, :]
                    else:
                        GT = GT.transpose(1, 3)
                        GT = GT.transpose(2, 3)

                    #  SR : Segmentation Result
                    SR = torch.sigmoid(self.unet(img))

                    # Flatten the prediction and target.
                    SR_flat = SR.view(SR.size(0), -1)
                    GT_flat = GT.view(GT.size(0), -1)

                    # Compute the loss for this batch.
                    if self.loss_function_name == 'Dice':
                        validation_loss = self.dice_coeff_loss(
                            SR_flat, GT_flat)
                    else:
                        validation_loss = self.loss_function(SR_flat, GT_flat)
                    length += 1
                    validation_epoch_loss += validation_loss.item()

                    # Empty cache to free up memory at the end of each batch.
                    del img, GT, SR, GT_flat, SR_flat, validation_loss
                    torch.cuda.empty_cache()

                # Normalize the validation loss.
                validation_epoch_loss = validation_epoch_loss / length

                end_time = timeit.default_timer()

                # Define the decisive score of the network as 1 - validation_epoch_loss.
                unet_score = 1. - validation_epoch_loss
                print('Current learning rate: {}'.format(self.current_lr))

                print(
                    '[Validation] Epoch [%d/%d] Validation Loss: %.6f, Run Time: %.4f [h]'
                    % (epoch + 1, self.num_epochs, validation_epoch_loss,
                       (end_time - start_time) / 60 / 60))

                # Append validation loss to train loss history (R&R)
                validation_loss_history.append(validation_epoch_loss)
                with open(os.path.join(self.current_loss_history_path, 'train_and_validation_history.csv'), 'a', \
                                     encoding = 'utf-8', newline= '') as f:
                    wr = csv.writer(f)
                    wr.writerow(['Validation', '%d' % (epoch + 1), '%d' % (self.num_epochs), '%d' % (self.batch_size), \
                                              '%s' % self.loss_function_name, '%.6f' % validation_epoch_loss])
                    f.close()

                # Make sure we save the best and last unets.
                if unet_score > best_unet_score:
                    best_unet_score = unet_score
                    best_epoch = epoch
                    best_unet = self.unet.state_dict()
                    print('Best %s model score : %.6f' %
                          (self.model_type, best_unet_score))
                    torch.save(best_unet, unet_path)
                if (epoch == self.num_epochs - 1):
                    last_unet = self.unet.state_dict()
                    torch.save(last_unet, last_unet_path)
                if epoch % 10 == 0 and epoch != 0:
                    epoch_unet_path_component = unet_path.split('/')
                    file_name = epoch_unet_path_component[-1].replace(
                        'best', 'epoch%d' % epoch)
                    epoch_unet_path_component[-1] = file_name
                    epoch_unet_path = '/'.join(epoch_unet_path_component)
                    epoch_unet = self.unet.state_dict()
                    torch.save(epoch_unet, epoch_unet_path)

                # Adaptive Learning Rate (R&R)
                try:
                    previous_epoch = self.adaptive_lr_handler(
                        3, 0.01 * self.initial_lr, epoch, previous_epoch, 0.98,
                        0.5, validation_loss_history)
                except:
                    previous_epoch = self.adaptive_lr_handler(
                        3, 0.01 * self.initial_lr, epoch, 0, 0.98, 0.5,
                        validation_loss_history)

                # Early stop (R&R)
                if (self.early_stop == True):
                    if (len(validation_loss_history) > 9):
                        if (np.mean(validation_loss_history[-10:-5]) <=
                                np.mean(validation_loss_history[-5:])):
                            print(
                                'Validation loss stop decreasing. Stop training.'
                            )
                            last_unet = self.unet.state_dict()
                            torch.save(last_unet, last_unet_path)
                            break

        del self.unet
        try:
            del best_unet
            torch.cuda.empty_cache()
        except:
            print(
                'Cannot delete the variable "best_unet": variable does not exist.'
            )

        return train_loss_history, validation_loss_history