Beispiel #1
0
def detect_peaks(image):
    """
    http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array

    Takes an image and detect the peaks using the local maximum filter.
    Returns a boolean mask of the peaks (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    from scipy.ndimage.filters import maximum_filter
    from scipy.ndimage.morphology import generate_binary_structure, binary_erosion

    # define an 8-connected neighborhood
    neighborhood = generate_binary_structure(2,2)

    #apply the local maximum filter; all pixel of maximal value 
    #in their neighborhood are set to 1
    local_max = maximum_filter(image, footprint=neighborhood)==image
    background = (image==0)

    #a little technicality: we must erode the background in order to 
    #successfully subtract it form local_max, otherwise a line will 
    #appear along the background border (artifact of the local maximum filter)
    eroded_background = binary_erosion(background, structure=neighborhood, 
                                       border_value=1)
    detected_peaks = local_max - eroded_background
    peaks = np.array(np.where(detected_peaks)).T

    return peaks
Beispiel #2
0
def __surface_distances(input1, input2, voxelspacing=None, connectivity=1):
    """
    The distances between the surface voxel of binary objects in input1 and their
    nearest partner surface voxel of a binary object in input2.
    """
    input1 = numpy.atleast_1d(input1.astype(numpy.bool))
    input2 = numpy.atleast_1d(input2.astype(numpy.bool))
    if voxelspacing is not None:
        voxelspacing = _ni_support._normalize_sequence(voxelspacing, input1.ndim)
        voxelspacing = numpy.asarray(voxelspacing, dtype=numpy.float64)
        if not voxelspacing.flags.contiguous:
            voxelspacing = voxelspacing.copy()
            
    # binary structure
    footprint = generate_binary_structure(input1.ndim, connectivity)
            
    # extract only 1-pixel border line of objects
    input1_border = input1 - binary_erosion(input1, structure=footprint, iterations=1)
    input2_border = input2 - binary_erosion(input2, structure=footprint, iterations=1)
    
    # compute average surface distance        
    # Note: scipys distance transform is calculated only inside the borders of the
    #       foreground objects, therefore the input has to be reversed
    dt = distance_transform_edt(~input2_border, sampling=voxelspacing)
    sds = dt[input1_border]
    
    return sds    
Beispiel #3
0
def find_local_maxima(arr):
    """
    Find local maxima in a multidimensional array `arr`.
    
    Parameters
    ----------
    arr : np.ndarray
        The array to find maxima in
    
    Returns
    -------
    indices : tuple of np.ndarray
        The indices of local maxima in `arr`
    """
    
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    
    # neighborhood is simply a 3x3x3 array of True
    neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)
    local_max = ( filters.maximum_filter(arr, footprint=neighborhood) == arr )
    
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    background = ( arr == 0 )
    eroded_background = morphology.binary_erosion(background,
                                                  structure=neighborhood,
                                                  border_value=1)
        
    # we obtain the final mask, containing only peaks, 
    # by removing the background from the local_min mask
    detected_max = local_max ^ eroded_background # ^ = XOR
    
    return np.where(detected_max)
Beispiel #4
0
def mkoutersurf(image, radius, outfile):
    #radius information is currently ignored
    #it is a little tougher to deal with the morphology in python

    fill = nib.load( image )
    filld = fill.get_data()
    filld[filld==1] = 255

    gaussian = np.ones((2,2))*.25

    image_f = np.zeros((256,256,256))

    for slice in xrange(256):
        temp = filld[:,:,slice]
        image_f[:,:,slice] = convolve(temp, gaussian, 'same')

    image2 = np.zeros((256,256,256))
    image2[np.where(image_f <= 25)] = 0
    image2[np.where(image_f > 25)] = 255

    strel15 = generate_binary_structure(3, 1)

    BW2 = grey_closing(image2, structure=strel15)
    thresh = np.max(BW2)/2
    BW2[np.where(BW2 <= thresh)] = 0
    BW2[np.where(BW2 > thresh)] = 255

    v, f = marching_cubes(BW2, 100)

    v2 = np.transpose(
             np.vstack( ( 128 - v[:,0],
                          v[:,2] - 128,
                          128 - v[:,1], )))
    
    write_surface(outfile, v2, f)
Beispiel #5
0
def detect_local_maxima(vol):
    """
    Takes a 3D volume and detects the peaks using the local maximum filter.
    Returns a boolean mask of the peaks (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define a 26-connected neighborhood
    neighborhood = morphology.generate_binary_structure(3,3) # first is dimension, next is relative connectivity

    # apply the local maximum filter; all locations of maximum value 
    # in their neighborhood are set to 1
    local_max = (filters.maximum_filter(vol, footprint=neighborhood)==vol)

    # Remove background
    local_max[vol==0] = 0

    # Find endpoint indici
    [xOrig,yOrig,zOrig] = np.shape(vol)
    x = []
    y = []
    z = []
    for i in range(0,xOrig):
        for j in range(0,yOrig):
            for k in range(0,zOrig):
                if local_max[i,j,k] > 0:
                    x.append(i)
                    y.append(j)
                    z.append(k)

    return x, y, z
Beispiel #6
0
 def __init__(self, geometric_model='affine', tps_grid_size=3, tps_reg_factor=0, h_matches=15, w_matches=15, use_conv_filter=False, dilation_filter=None, use_cuda=True, normalize_inlier_count=False, offset_factor=227/210):
     super(WeakInlierCount, self).__init__()
     self.normalize=normalize_inlier_count
     self.geometric_model = geometric_model
     self.geometricTnf = GeometricTnf(geometric_model=geometric_model,
                                      tps_grid_size=tps_grid_size,
                                      tps_reg_factor=tps_reg_factor,
                                      out_h=h_matches, out_w=w_matches,
                                      offset_factor = offset_factor,
                                      use_cuda=use_cuda)
     # define dilation filter
     if dilation_filter is None:
         dilation_filter = generate_binary_structure(2, 2)
     # define identity mask tensor (w,h are switched and will be permuted back later)
     mask_id = np.zeros((w_matches,h_matches,w_matches*h_matches))
     idx_list = list(range(0, mask_id.size, mask_id.shape[2]+1))
     mask_id.reshape((-1))[idx_list]=1
     mask_id = mask_id.swapaxes(0,1)
     # perform 2D dilation to each channel 
     if not use_conv_filter:
         if not (isinstance(dilation_filter,int) and dilation_filter==0):
             for i in range(mask_id.shape[2]):
                 mask_id[:,:,i] = binary_dilation(mask_id[:,:,i],structure=dilation_filter).astype(mask_id.dtype)
     else:
         for i in range(mask_id.shape[2]):
             flt=np.array([[1/16,1/8,1/16],
                              [1/8, 1/4, 1/8],
                              [1/16,1/8,1/16]])
             mask_id[:,:,i] = scipy.signal.convolve2d(mask_id[:,:,i], flt, mode='same', boundary='fill', fillvalue=0)
         
     # convert to PyTorch variable
     mask_id = Variable(torch.FloatTensor(mask_id).transpose(1,2).transpose(0,1).unsqueeze(0),requires_grad=False)
     self.mask_id = mask_id
     if use_cuda:
         self.mask_id = self.mask_id.cuda();
Beispiel #7
0
def getSaddlePoints(matrix, gaussian_filter_sigma=0., low=None, high=None):
    if low == None:
        low = matrix.min()
    if high == None:
        high = matrix.max()
    matrix = expandMatrix(matrix)
    neighborhood = morphology.generate_binary_structure(len(matrix.shape),2)
    # apply the local minimum filter; all locations of minimum value
    # in their neighborhood are set to 1
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
    matrix = filters.minimum_filter(matrix, footprint=neighborhood)
    matrix = condenseMatrix(matrix)
    outPath, clusterPathMat, grad = minPath(matrix)
    flood = numpy.asarray(outPath)
    potential = []
    for e in flood:
        i,j = e
        potential.append(matrix[i,j])
    potential = numpy.asarray(potential)
    potential = scipy.ndimage.filters.gaussian_filter(potential, gaussian_filter_sigma)
    derivative = lambda x: numpy.array(zip(-x,x[1:])).sum(axis=1)
    signproduct = lambda x: numpy.array(zip(x,x[1:])).prod(axis=1)
    potential_prime = derivative(potential)
    signproducts = numpy.sign(signproduct(potential_prime))
    extrema = flood[2:][numpy.where(signproducts<0)[0],:]
    bassinlimits = derivative(signproducts)
    saddlePoints = numpy.asarray(outPath[3:])[bassinlimits==-2]
    saddlePointValues = numpy.asarray(map(lambda x: matrix[x[0],x[1]], saddlePoints))
    saddlePoints = saddlePoints[numpy.logical_and(saddlePointValues>=low, saddlePointValues<=high),:]
    return saddlePoints
Beispiel #8
0
def plotPeaks(arr2D, amp_min=DEFAULT_AMP_MIN):
	# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
	struct = generate_binary_structure(2, 1)
	neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

	# find local maxima using our fliter shape
	local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
	background = (arr2D == 0)
	eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

	# Boolean mask of arr2D with True at peaks
	detected_peaks = local_max - eroded_background

	# extract peaks
	amps = arr2D[detected_peaks]
	j, i = np.where(detected_peaks)

	# filter peaks
	amps = amps.flatten()
	peaks = zip(i, j, amps)
	peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

	# get indices for frequency and time
	frequency_idx = [x[1] for x in peaks_filtered]
	time_idx = [x[0] for x in peaks_filtered]

	# scatter of the peaks
	fig, ax = plt.subplots()
	ax.imshow(arr2D)
	ax.scatter(time_idx, frequency_idx)
	ax.set_xlabel('Time')
	ax.set_ylabel('Frequency')
	ax.set_title("Spectrogram")
	plt.gca().invert_yaxis()
	plt.show()
Beispiel #9
0
def get2DPeaks(arr2D):
    """
        Generates peaks of a spectogram.
        Args:
            arr2D: spectogram.
        Returns:
            List of pairs (time, frequency) of peaks.
    """

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
Beispiel #10
0
def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
    """
    The distances between the surface voxel of binary objects in result and their
    nearest partner surface voxel of a binary object in reference.
    """
    result = numpy.atleast_1d(result.astype(numpy.bool))
    reference = numpy.atleast_1d(reference.astype(numpy.bool))
    if voxelspacing is not None:
        voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
        voxelspacing = numpy.asarray(voxelspacing, dtype=numpy.float64)
        if not voxelspacing.flags.contiguous:
            voxelspacing = voxelspacing.copy()
            
    # binary structure
    footprint = generate_binary_structure(result.ndim, connectivity)
    
    # test for emptiness
    if 0 == numpy.count_nonzero(result): 
        raise RuntimeError('The first supplied array does not contain any binary object.')
    if 0 == numpy.count_nonzero(reference): 
        raise RuntimeError('The second supplied array does not contain any binary object.')    
            
    # extract only 1-pixel border line of objects
    result_border = result - binary_erosion(result, structure=footprint, iterations=1)
    reference_border = reference - binary_erosion(reference, structure=footprint, iterations=1)
    
    # compute average surface distance        
    # Note: scipys distance transform is calculated only inside the borders of the
    #       foreground objects, therefore the input has to be reversed
    dt = distance_transform_edt(~reference_border, sampling=voxelspacing)
    sds = dt[result_border]
    
    return sds
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D

    background = (arr2D == 0)

    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
Beispiel #12
0
    def erodeDilate(self, img, method=None):
        """
        Use morphological operators to erode or dilate the ridge structure.
        Dilate uses a recursive call to first dilate then erode.  Dilation
        alone produces ridge structures that are too thick to look
        authentic. Recursive call introduces random spurious minutiae when
        some valley structures are bridged.
        """
        img = np.array(img)
        if not method:
            method = random.choice(('erode', 'dilate', 'none'))
        inkIndex = np.where(img < 250)
        imgBin = np.zeros(np.shape(img))
        imgBin[inkIndex] = 1
        
        strel = morphology.generate_binary_structure(2,2)
        if method == 'erode':
            imgBin = morphology.binary_erosion(imgBin, strel)
        elif method == 'dilate':
            imgBin = morphology.binary_dilation(imgBin, strel)
        else:
            return img

        inkIndex = np.where(imgBin == 1)
        returnImg = 255*np.ones(np.shape(img))
        returnImg[inkIndex] = 0
        
        # Recursive call to erode after dilation to give more authentic
        # appearance.  Erode after dilate introduces spurious minutiae
        # but does not make the ridge structure too thick
        if method == 'dilate':
            self.erodeDilate(returnImg, method='erode')
        
        return returnImg
Beispiel #13
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = arr2D == 0
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    detected_peaks = local_max - eroded_background

    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel("Time")
        ax.set_ylabel("Frequency")
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return zip(frequency_idx, time_idx)
Beispiel #14
0
    def __init__(self, image, perc):
        threshold = np.percentile(image.ravel(), perc)
        a = image.copy()
        # Keep only tail of image values distribution with signal
        a[a < threshold] = 0
        s = generate_binary_structure(2, 2)
        # Label image
        labeled_array, num_features = label(a, structure=s)
        # Find objects
        objects = find_objects(labeled_array)
        # Container of object's properties
        _objects = np.empty(num_features, dtype=[('label', 'int'),
                                                 ('dx', '<f8'),
                                                 ('dy', '<f8'),
                                                 ('max_pos', 'int',
                                                  (2,))])

        labels = np.arange(num_features) + 1
        dx = [int(obj[1].stop - obj[1].start) for obj in objects]
        dy = [int(obj[0].stop - obj[0].start) for obj in objects]

        # Filling objects structured array
        _objects['label'] = labels
        _objects['dx'] = dx
        _objects['dy'] = dy
        self.objects = _objects
        self._classify(image, labeled_array)
        # Fetch positions of only successfuly classified objects
        self.max_pos = self._find_positions(image, labeled_array)
        self._sort()
Beispiel #15
0
def detect_peaks(image):
 """
 from: http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
 Takes an image and detect the peaks usingthe local maximum filter.
 Returns a boolean mask of the peaks (i.e. 1 when
 the pixel's value is the neighborhood maximum, 0 otherwise)
 """

 # define an 8-connected neighborhood
 neighborhood = generate_binary_structure(2,2)

 #apply the local maximum filter; all pixel of maximal value 
 #in their neighborhood are set to 1
 local_max = maximum_filter(image, footprint=neighborhood)==image
 #local_max is a mask that contains the peaks we are 
 #looking for, but also the background.
 #In order to isolate the peaks we must remove the background from the mask.

 #we create the mask of the background
 background = (image==0)

 #a little technicality: we must erode the background in order to 
 #successfully subtract it form local_max, otherwise a line will 
 #appear along the background border (artifact of the local maximum filter)
 eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

 #we obtain the final mask, containing only peaks, 
 #by removing the background from the local_max mask
 detected_peaks = local_max - eroded_background

 return detected_peaks
Beispiel #16
0
def snapshot_shaded_area(file_name):
    """
    Return an array of shaded cloud areas for a 3D model snapshot.
        
    Parameters
    ----------
    file_name : netCDF file name
        Data has dimensions float x(x), float y(y) and float z(z) and the field
        has been conditionally sampled for condensed area in variable int
        condensed(z, y, x).
      
    Return
    ------
    numpy.array
        Array of shaded cloud areas in units of grid cell.    
    """
    
    # Read netCDF dataset
    data = Dataset(file_name)
    condensed = data.variables['condensed'][...]
    
    # Determine shaded area mask (logical or along vertical axis)
    shaded_mask = np.any(condensed, axis=0)
    
    # Tag clouds; consider clouds connected even if they touch diagonally
    s = morphology.generate_binary_structure(2, 2)
    clouds, n = measurements.label(shaded_mask, s)
    
    # Count number of grid cells in each cloud; remove cloud free area
    cloud_areas = np.bincount(clouds.flatten().astype(int))[1:]
    
    return cloud_areas
def detect_local_minima(arr):
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    """
    Takes an array and detects the troughs using the local maximum filter.
    Returns a boolean mask of the troughs (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define an connected neighborhood
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
    neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
    # apply the local minimum filter; all locations of minimum value 
    # in their neighborhood are set to 1
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
    local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)
    # local_min is a mask that contains the peaks we are 
    # looking for, but also the background.
    # In order to isolate the peaks we must remove the background from the mask.
    # 
    # we create the mask of the background
    background = (arr==0)
    # 
    # a little technicality: we must erode the background in order to 
    # successfully subtract it from local_min, otherwise a line will 
    # appear along the background border (artifact of the local minimum filter)
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    eroded_background = morphology.binary_erosion(
        background, structure=neighborhood, border_value=1)
    # 
    # we obtain the final mask, containing only peaks, 
    # by removing the background from the local_min mask
    detected_minima = local_min - eroded_background
    return np.where(detected_minima)
def fit_GLC_grid( subjdata, z_limit=ZLIMIT ):
    #thesedata=subjdata
    dims = len(subjdata[0])-1
    
    bounds = [(.00001,50)] + [(-25,25) for _ in range( dims+1 ) ]
    optargs = ( subjdata, z_limit, None, True)
    
    xopt, fopt, grid, Jout = optimize.brute(func=negloglike_reduced 
                                              , ranges = bounds
                                              , args = optargs
                                              , Ns = 5
                                              , full_output=True
                                             )
    
    from scipy.ndimage.filters import minimum_filter
    from scipy.ndimage.morphology import generate_binary_structure
    
    neighborhood = generate_binary_structure( dims+2, dims+2 )
    local_mins = minimum_filter( Jout, footprint=neighborhood ) == Jout
    min_coords = np.array([ g[local_mins] for g in grid ]).T
    xoptglobal = xopt
    foptglobal = fopt
    for coords in min_coords:
        xopt, fopt, iter, im, sm = optimize.fmin(func=negloglike_reduced 
                                                  , x0 = coords
                                                  , args = optargs
                                                  , full_output=True
                                                 )
        if fopt < foptglobal:
            xoptglobal = xopt
            foptglobal = fopt
    
    return xoptglobal, foptglobal
def get_2D_peaks(array2D):
    # This function is based on the function 'get_2D_peaks()' available at the URL below.
    # https://github.com/worldveil/dejavu/blob/master/dejavu/fingerprint.py
    # Copyright (c) 2013 Will Drevo, use permitted under the terms of the open-source MIT License.

    # Create a filter to extract peaks from the image data.
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 25)

    # Find local maxima using our fliter shape. These are boolean arrays.
    local_maxima = maximum_filter(array2D, footprint=neighborhood) == array2D
    background = (array2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    # Boolean mask of array2D with True at peaks.
    detected_peaks = local_maxima - eroded_background

    # Extract peak amplitudes and locations.
    amps = array2D[detected_peaks]
    j, i = numpy.where(detected_peaks)

    # Filter peaks for those exceeding the minimum amplitude.
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]

    # Get frequency and time at peaks.
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return (frequency_idx, time_idx)
def incorporate_cells(binary_image):    
    # invert input binary image
    inv_image = np.invert(binary_image)
    
    # matrix for binary_dilation
    struct = generate_binary_structure(2, 1)

    # do bunary dilation until the colony number even out
    plate_bin_dil = binary_dilation(inv_image, structure=struct)
    plate_dil_labels = label(plate_bin_dil)
    labels_number = len(np.unique(plate_dil_labels))  # initial number of colonies
    new_labels_number = labels_number - 1  # starting value
    cycle_number = 0  # starting value for dilation cycles
    while True:
        cycle_number += 1
        if cycle_number >= 30:
            break  # defence against infinite cycling
        else:
            if new_labels_number >= labels_number:
                break   # further dilation is useless (in theory)
            else:
                labels_number = new_labels_number
                plate_bin_dil = binary_dilation(plate_bin_dil, structure=struct)
                plate_dil_labels = label(plate_bin_dil)
                new_labels_number = len(np.unique(plate_dil_labels))
                
    return plate_bin_dil
def detect_local_maxima(image):
    neighborhood = generate_binary_structure(2,2)
    local_max = maximum_filter(image, footprint=neighborhood)==image
    background = (image==0)
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
    detected_peaks = local_max - eroded_background
    return detected_peaks*256.0
def detect_local_minima(arr):
    neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)
    local_min = (filters.minimum_filter(arr, footprint=neighborhood) == arr)
    background = (arr == 0)
    eroded_background = morphology.binary_erosion(
            background, structure=neighborhood, border_value=1)
    detected_minima = local_min - eroded_background
    return np.where(detected_minima)
def determine_search_location_per_channel(A, d1, d2, nr, method= 'ellipse',
                                          min_size=3, max_size=8, dist=3,
                                          expandCore=iterate_structure(generate_binary_structure(2,1),2).astype(int)):

    IND = np.array([]).reshape((d1*d2,0))

    if type(method) == str: method = [method]

    for ni in xrange(len(np.unique(nr))):
        IND_ch = determine_search_location(A[:,comp_idx(nr, ni)], d1, d2,
                                        cm[comp_idx(nr,ni),:], method = method[ni], 
                                        min_size = 3, max_size = 8, dist = 3, 
                                        expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int))

        IND = np.hstack([IND, IND_ch])

    return IND
Beispiel #24
0
def spatial_filter(skeleton,image_array,loops,iterations):
	# image_array = np.where(image_array > threshold, 1, 0)
	im = np.zeros_like(image_array)
	s = morphology.generate_binary_structure(2,2)
	for i in range(loops):
		print i
		grow = morphology.binary_dilation(skeleton+im, iterations = iterations, structure = s)
		im = np.where(grow !=0, image_array , 0)
	return im
def find_peaks(file):
  #read image data
  f=pyfits.open(file)
  img=f[0].data

  #set NaN pixels (empty pixels) to zero
  img[img != img]=0.0
  img[img<0.]=0.0
  if dim==4:
    img=img[0,0,:,:] #gets rid of 3 and 4 dimensions, since FIRST fits files have four axis but only first two have data
    
  T=ndimage.standard_deviation(img)
  sourcelabels,num_sources=ndimage.label(img>T)
  backgroundlabels,num_background=ndimage.label(img<T)
  # define an 8-connected neighbourhood
  neighborhood = generate_binary_structure(2,2)
  fimg=img*sourcelabels
  #apply the local maximum filter; all pixel of maximal value 
  #in their neighbourhood are set to 1
  local_max=maximum_filter(fimg,footprint=neighborhood)==fimg
  #In order to isolate the peaks we must remove the background from the mask.
  #we create the mask of the background
  background=img*backgroundlabels
  #we must erode the background in order to 
  #successfully subtract it form local_max, otherwise a line will 
  #appear along the background border (artifact of the local maximum filter)
  eroded_background=binary_erosion(background,structure=neighborhood,border_value=1)
  
  #we obtain the final mask, containing only peaks, 
  #by removing the background from the local_max mask
  detected_peaks=local_max-eroded_background
  #contains some peaks not in source (background bright features), but code can remove these
  #now need to find positions of these maximum
  #label peaks
  peaklabels,num_peaks=ndimage.measurements.label(detected_peaks)
  
  #get peak positions  
  slices = ndimage.find_objects(peaklabels)
  x, y = [], []
  for dy,dx in slices:
      x_center = (dx.start + dx.stop - 1)/2
      x.append(x_center)
      y_center = (dy.start + dy.stop - 1)/2    
      y.append(y_center)
      
  peak_positions=zip(x,y)
  
  #get peak values, in Jy/beam
  peak_fluxes=[]
  for coord in peak_positions:
      peak_fluxes.append(img[coord[1],coord[0]])
  peaks=zip(peak_positions,peak_fluxes)
  
  #sort by peak_fluxes. Two brightest peaks will be the first two in the list
  peaks=sorted(peaks,key=lambda l: l[1])  
  peaks.reverse()
  return peaks,img,f
def extracts_minima_areas(arr):
    neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
    local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)
    labels = measurements.label(local_min)[0]
    objects = measurements.find_objects(labels)
    areas_and_indices_and_bounding_boxes = []
    for idx, sl in enumerate(objects):
        areas_and_indices_and_bounding_boxes.append((len(arr[sl][labels[sl] == idx + 1]), idx + 1, sl)) # first area, then index, then bounding box
    return sorted(areas_and_indices_and_bounding_boxes), labels
Beispiel #27
0
def __distinct_binary_object_correspondences(reference, result, connectivity=1):
    """
    Determines all distinct (where connectivity is defined by the connectivity parameter
    passed to scipy's `generate_binary_structure`) binary objects in both of the input
    parameters and returns a 1to1 mapping from the labelled objects in reference to the
    corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
    result.

    All stems from the problem, that the relationship is non-surjective many-to-many.

    @return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
    """
    result = np.atleast_1d(result.astype(np.bool))
    reference = np.atleast_1d(reference.astype(np.bool))

    # binary structure
    footprint = generate_binary_structure(result.ndim, connectivity)

    # label distinct binary objects
    labelmap1, n_obj_result = label(result, footprint)
    labelmap2, n_obj_reference = label(reference, footprint)

    # find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing
    slicers = find_objects(labelmap2)  # get windows of labelled objects
    mapping = dict()  # mappings from labels in labelmap2 to corresponding object labels in labelmap1
    used_labels = set()  # set to collect all already used labels from labelmap2
    one_to_many = list()  # list to collect all one-to-many mappings
    for l1id, slicer in enumerate(slicers):  # iterate over object in labelmap2 and their windows
        l1id += 1  # labelled objects have ids sarting from 1
        bobj = (l1id) == labelmap2[slicer]  # find binary object corresponding to the label1 id in the segmentation
        l2ids = np.unique(labelmap1[slicer][
                                 bobj])  # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)
        l2ids = l2ids[0 != l2ids]  # remove background identifiers (=0)
        if 1 == len(
                l2ids):  # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used
            l2id = l2ids[0]
            if not l2id in used_labels:
                mapping[l1id] = l2id
                used_labels.add(l2id)
        elif 1 < len(l2ids):  # one-to-many mapping: store relationship for later processing
            one_to_many.append((l1id, set(l2ids)))

    # process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first
    while True:
        one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in
                       one_to_many]  # remove already used ids from all sets
        one_to_many = [x for x in one_to_many if x[1]]  # remove empty sets
        one_to_many = sorted(one_to_many, key=lambda x: len(x[1]))  # sort by set length
        if 0 == len(one_to_many):
            break
        l2id = one_to_many[0][1].pop()  # select an arbitrary target label id from the shortest set
        mapping[one_to_many[0][0]] = l2id  # add to one-to-one mappings
        used_labels.add(l2id)  # mark target label as used
        one_to_many = one_to_many[1:]  # delete the processed set from all sets

    return labelmap1, labelmap2, n_obj_result, n_obj_reference, mapping
Beispiel #28
0
def detect_peaks( image ):
    """
    Detect the local maxima in a 2d matrix. Returns a 2d matrix where maxima are
    coded as 1s and the rest of the cells are 0s.
    """
    neighborhood = generate_binary_structure( 2, 2 )
    local_max = maximum_filter( image, footprint = neighborhood ) == image
    background = ( image == 0 )
    eroded_background = binary_erosion( background, structure = neighborhood, border_value = 1 )
    detected_peaks = local_max - eroded_background
    return detected_peaks
Beispiel #29
0
 def detect_peaks(self, image):
     """
     Takes an image and detect the peaks using the local maximum filter.
     Returns a boolean mask of the peaks (i.e. 1 when
     the pixel's value is the neighborhood maximum, 0 otherwise)
     """
     neighborhood = generate_binary_structure(2,2)
     local_max = maximum_filter(image, 5)==image
     background = (image==0)
     eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
     detected_peaks = local_max - eroded_background
     return detected_peaks
def detect_local_maxima(arr):
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    """
    Takes an array and detects the troughs using the local maximum filter.
    Returns a boolean mask of the troughs (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)
    local_max = (filters.maximum_filter(arr, footprint=neighborhood) == arr)
    background = (arr == 0)
    eroded_background = morphology.binary_erosion(background, structure=neighborhood, border_value=1)
    detected_minima = local_max - eroded_background
    return np.where(detected_minima)
Beispiel #31
0
def __surface_distances(input1, input2, voxelspacing=None, connectivity=1):
    """
    The distances between the surface voxel of binary objects in input1 and their
    nearest partner surface voxel of a binary object in input2.
    """
    input1 = np.atleast_1d(input1.astype(np.bool))
    input2 = np.atleast_1d(input2.astype(np.bool))
    if voxelspacing is not None:
        voxelspacing = _ni_support._normalize_sequence(voxelspacing,
                                                       input1.ndim)
        voxelspacing = np.asarray(voxelspacing, dtype=np.float64)
        if not voxelspacing.flags.contiguous:
            voxelspacing = voxelspacing.copy()

    # binary structure
    footprint = generate_binary_structure(input1.ndim, connectivity)

    # test for emptiness
    # if 0 == np.count_nonzero(input1):
    #     raise RuntimeError('The first supplied array does not contain any binary object.')
    # if 0 == np.count_nonzero(input2):
    #     raise RuntimeError('The second supplied array does not contain any binary object.')

    if np.count_nonzero(input1) == 0 or np.count_nonzero(input2) == 0:
        return np.max(np.shape(input1))
    # extract only 1-pixel border line of objects
    input1_border = input1 - binary_erosion(
        input1, structure=footprint, iterations=1)
    input2_border = input2 - binary_erosion(
        input2, structure=footprint, iterations=1)

    # compute average surface distance
    # Note: scipys distance transform is calculated only inside the borders of the
    #       foreground objects, therefore the input has to be reversed
    dt = distance_transform_edt(~input2_border, sampling=voxelspacing)
    sds = dt[input1_border]

    return sds
Beispiel #32
0
def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
    """
    The distances between the surface voxel of binary objects in result and their
    nearest partner surface voxel of a binary object in reference.
    """
    result = np.atleast_1d(result.astype(np.bool))
    reference = np.atleast_1d(reference.astype(np.bool))
    if voxelspacing is not None:
        voxelspacing = _ni_support._normalize_sequence(voxelspacing,
                                                       result.ndim)
        voxelspacing = np.asarray(voxelspacing, dtype=np.float64)
        if not voxelspacing.flags.contiguous:
            voxelspacing = voxelspacing.copy()

    # binary structure
    footprint = generate_binary_structure(result.ndim, connectivity)

    # test for emptiness
    if 0 == np.count_nonzero(result):
        raise RuntimeError(
            'The first supplied array does not contain any binary object.')
    if 0 == np.count_nonzero(reference):
        raise RuntimeError(
            'The second supplied array does not contain any binary object.')

    # extract only 1-pixel border line of objects
    result_border = result ^ binary_erosion(
        result, structure=footprint, iterations=1)
    reference_border = reference ^ binary_erosion(
        reference, structure=footprint, iterations=1)

    # compute average surface distance
    # Note: scipys distance transform is calculated only inside the borders of the
    #       foreground objects, therefore the input has to be reversed
    dt = distance_transform_edt(~reference_border, sampling=voxelspacing)
    sds = dt[result_border]

    return sds
Beispiel #33
0
def process_mask(mask):
    """
    对一片肺叶的3D binary mask 进行处理
    :param mask: 其中一片肺叶的3Dmask,和CT的尺寸一致 ndarray (D,H,W) np.bool
    :return: dilatedMask 尺寸与输入一致
    """
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        mask1 = np.ascontiguousarray(mask[i_layer])
        # 将mask1中的所有元素累计求和,并非完全是背景
        if np.sum(mask1) > 0:
            # 计算能完全包住mask1前景区域的最小多边形
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2) > 2 * np.sum(
                    mask1):  # 这里取2还是1.5 扩张得过于严重,那么就让就取消 convex_hull_image 操作
                mask2 = mask1
        # 完全是背景
        else:
            mask2 = mask1
        convex_mask[i_layer] = mask2
    struct = generate_binary_structure(3, 1)
    dilatedMask = binary_dilation(convex_mask, structure=struct, iterations=10)
    return dilatedMask
Beispiel #34
0
def spec_to_peaks(data,
                  value,
                  fp=iterate_structure(
                      generate_binary_structure(rank=2, connectivity=2), 10)):
    """Gets the data and the cutoff and return the true and false values of the max

    Parameters
    ----------
        data: numpy.ndarray, shape = (M, N)
            A 2-D array of the spectrum data
        value: integer
            Cutoff values
        fp: numpy.ndarray,  boolean [Optional]
            A 2-D array Fingerprint for the surrounding
    Returns
    -------
        isPeaks: numpy.ndarray, shape = (M, N)
            A 2-D array of true/false values of the data where peaks are located

    """

    max_arr = maximum_filter(data, footprint=fp)
    return (data == max_arr) & (data > value)
def detect_largest_umap_areas_slice(slice_u_map, structure):

    binary_map = np.zeros(slice_u_map.shape).astype(np.bool)
    mask = slice_u_map != 0
    binary_map[mask] = True
    binary_structure = generate_binary_structure(binary_map.ndim, connectivity=2)
    bin_labels, num_of_objects = scipy_label(binary_map, binary_structure)
    blob_area_sizes = []
    blob_after_erosion_sizes = []
    if num_of_objects >= 1:
        for i in np.arange(1, num_of_objects + 1):
            binary_map = np.zeros(bin_labels.shape).astype(np.bool)
            binary_map[bin_labels == i] = 1
            blob_area_sizes.append(np.count_nonzero(binary_map))
            remaining_blob = binary_erosion(binary_map, structure)
            blob_after_erosion_sizes.append(np.count_nonzero(remaining_blob))
        blob_area_sizes = np.array(blob_area_sizes)
        blob_area_sizes[::-1].sort()
        # print(blob_after_erosion_sizes)
        blob_after_erosion_sizes = np.array(blob_after_erosion_sizes)
        blob_after_erosion_sizes[::-1].sort()

    return blob_area_sizes, blob_after_erosion_sizes
Beispiel #36
0
def mkoutersurf(image, radius, outfile):
    #radius information is currently ignored
    #it is a little tougher to deal with the morphology in python

    fill = nib.load(image)
    filld = fill.get_data()
    filld[filld == 1] = 255

    gaussian = np.ones((2, 2)) * .25

    image_f = np.zeros((256, 256, 256))

    for slice in range(256):
        temp = filld[:, :, slice]
        image_f[:, :, slice] = convolve(temp, gaussian, 'same')

    image2 = np.zeros((256, 256, 256))
    image2[np.where(image_f <= 25)] = 0
    image2[np.where(image_f > 25)] = 255

    strel15 = generate_binary_structure(3, 1)

    BW2 = grey_closing(image2, structure=strel15)
    thresh = np.max(BW2) / 2
    BW2[np.where(BW2 <= thresh)] = 0
    BW2[np.where(BW2 > thresh)] = 255

    v, f, _, _ = measure.marching_cubes_lewiner(BW2, 100)

    v2 = np.transpose(
        np.vstack((
            128 - v[:, 0],
            v[:, 2] - 128,
            128 - v[:, 1],
        )))

    write_surface(outfile, v2, f)
def remove_parked_lows(lows):
    """ID_parked_lows
 
    Returns the locations of low pressure systems
    which stay in one location over the coarse of many days.

    Parameters:
    -------------------
    lows: a binary array where '1' is the location of a low
    lat: an array of latitudes for the lows grid
    lon: an array of longitudes for the lows grid
    """
    """
    for t in times:
       buffer around each low
    find buffers that overlap
    delete these for now; just include moving storms
    """
    lows_copy = np.copy(lows)
    time = lows.shape[0]
    for day in range(1,time):
        bi = morphology.generate_binary_structure(2,2)
        buffered_lows_new = morphology.binary_dilation(lows[day,:,:],structure = bi,iterations = 1).astype(int) 
        buffered_lows_old = morphology.binary_dilation(lows[day-1,:,:],structure = bi,iterations = 1).astype(int)
        storms, _ = ndimage.label(buffered_lows_new)
        # id which storms are overlapping
        lowsum = buffered_lows_new + buffered_lows_old   
        k = np.where(lowsum == 2)
        label_list = np.unique(storms[k])
        # delete those storms
        l = np.select([lows[day,:,:] == 1],[storms])
        lnew = np.in1d(l,label_list).astype(int)
        lnew = np.reshape(lnew,(lows.shape[1],lows.shape[2]))
        l[lnew == 1] = 0
        l[l > 0] = 1
        lows_copy[day,:,:] = l
    return lows_copy
Beispiel #38
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    # scatter of the peaks
    if plot:
      fig, ax = plt.subplots()
      ax.imshow(arr2D)
      ax.scatter(time_idx, frequency_idx)
      ax.set_xlabel('Time')
      ax.set_ylabel('Frequency')
      ax.set_title("Spectrogram")
      plt.gca().invert_yaxis()
      plt.show()

    return zip(frequency_idx, time_idx)
Beispiel #39
0
def detect_local_maxima(arr, toricMap=False):
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    """
    Takes an array and detects the troughs using the local maximum filter.
    Returns a boolean mask of the troughs (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define an connected neighborhood
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
    if toricMap:
        X,Y = arr.shape
        arr = expandMatrix(arr)
    neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
    # apply the local minimum filter; all locations of minimum value
    # in their neighborhood are set to 1
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
    local_max = (filters.maximum_filter(arr, footprint=neighborhood)==arr)
    # local_min is a mask that contains the peaks we are
    # looking for, but also the background.
    # In order to isolate the peaks we must remove the background from the mask.
    #
    # we create the mask of the background
    background = (arr==0)
    #
    # a little technicality: we must erode the background in order to
    # successfully subtract it from local_min, otherwise a line will
    # appear along the background border (artifact of the local minimum filter)
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    eroded_background = morphology.binary_erosion(
        background, structure=neighborhood, border_value=1)
    #
    # we obtain the final mask, containing only peaks,
    # by removing the background from the local_min mask
    detected_maxima = local_max - eroded_background
    if toricMap:
        detected_maxima = detected_maxima[X:2*X,Y:2*Y]
    return numpy.where(detected_maxima)
Beispiel #40
0
def detect_peaks(image):
    """
    Takes an image and detect the peaks usingthe local maximum filter.
    Returns a boolean mask of the peaks (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """

    # define an 8-connected neighborhood
    neighborhood = generate_binary_structure(2, 2)

    # apply the local maximum filter; all pixel of maximal value
    # in their neighborhood are set to 1
    local_max = maximum_filter(image, footprint=neighborhood) == image
    # local_max is a mask that contains the peaks we are
    # looking for, but also the background.
    # In order to isolate the peaks we must remove the background from the mask.

    # we create the mask of the background
    background = (image == 0)

    # a little technicality: we must erode the background in order to
    # successfully subtract it form local_max, otherwise a line will
    # appear along the background border (artifact of the local maximum filter)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # we obtain the final mask, containing only peaks,
    # by removing the background from the local_max mask (xor operation)
    det_peaks = local_max ^ eroded_background
    dp = det_peaks.flatten()
    p = 0
    for i in range(len(dp)):
        if dp[i] == 1:
            p += 1
    return p, det_peaks
def surface_distance(result, reference, connectivity=1):
    result = result.astype(np.bool)
    reference = reference.astype(np.bool)

    # Create binary structure
    struct = generate_binary_structure(result.ndim, connectivity)

    # Test for empty images
    if np.count_nonzero(result) == 0:
        raise RuntimeError(
            'The result image does not contain any binary object.')
    if np.count_nonzero(reference) == 0:
        raise RuntimeError(
            'The reference image does not contain any binary object.')

    # Extract border images
    result_border = result ^ binary_erosion(result, structure=struct)
    reference_border = reference ^ binary_erosion(reference, structure=struct)

    # Compute average surface distance
    dt = distance_transform_edt(~reference_border)
    sds = dt[result_border]

    return sds
Beispiel #42
0
def spectrogram_to_peaks(spectrogram, cutoff, iterations=3):
    """ Given a spectrogram, return its peaks, which are all local peaks
        with values greater than the cutoff. 
        
        Parameters
        ----------
        spectrogram : numpy.ndarray, shape = (N, M)
            2D array of shape (N, M)
            
        cutoff : float
            elements less than or equal to the cutoff are considered 
            background elements
            
        iterations : int
            the number of iterations for the iterated footprint
            
        Returns
        -------
        A bool array of the peaks of a spectrogram. """

    fp = generate_binary_structure(rank=2, connectivity=1)
    spec_max = maximum_filter(spectrogram,
                              footprint=iterate_structure(fp, iterations))
    return (spectrogram == spec_max) & (spectrogram > cutoff)
Beispiel #43
0
 def get_2D_peaks(self, plot=False):
     struct = generate_binary_structure(2, 1)
     neighborhood = iterate_structure(struct, 20)
     local_max = maximum_filter(self.spectrogram,
                                footprint=neighborhood) == self.spectrogram
     background = (self.spectrogram == 0)
     eroded_background = binary_erosion(background,
                                        structure=neighborhood,
                                        border_value=1)
     detected_peaks = local_max ^ eroded_background
     # extract peaks
     amps = self.spectrogram[detected_peaks]
     j, i = np.where(detected_peaks)
     # filter peaks
     amps = amps.flatten()
     peaks = zip(i, j, amps)
     peaks_filtered = filter(lambda x: x[2] > 10, peaks)  # freq, time, amp
     # get indices for frequency and time
     frequency_idx = []
     time_idx = []
     for x in peaks_filtered:
         frequency_idx.append(x[1])
         time_idx.append(x[0])
     return zip(frequency_idx, time_idx)
Beispiel #44
0
def detect_peaks(image):
    """
    Takes an image and detect the peaks usingthe local maximum filter.
    Returns a boolean mask of the peaks (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)

    Obtained from http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
    """
    from scipy.ndimage.filters import maximum_filter
    from scipy.ndimage.morphology import (generate_binary_structure,
                                          binary_erosion)
    # define an 8-connected neighborhood
    neighborhood = generate_binary_structure(2, 2)

    # apply the local maximum filter; all pixel of maximal value
    # in their neighborhood are set to 1
    local_max = maximum_filter(image, footprint=neighborhood) == image
    # local_max is a mask that contains the peaks we are
    # looking for, but also the background.
    # In order to isolate the peaks we must remove the background from the mask

    # we create the mask of the background
    background = (image == 0)

    # a little technicality: we must erode the background in order to
    # successfully subtract it form local_max, otherwise a line will
    # appear along the background border (artifact of the local maximum filter)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # we obtain the final mask, containing only peaks,
    # by removing the background from the local_max mask
    detected_peaks = local_max - eroded_background

    return detected_peaks
Beispiel #45
0
def find_peaks(heatmap):
    """ We apply the "mask" method similar to github/erdogant/findpeaks
    directly"""
    # set threshold
    threshold = 0
    # create the footprint
    # a neighborhood of a pixel connected to its 8 neighbors
    neighborhood = generate_binary_structure(2, 2)

    # apply local maximum filter mask, max pixels in neighborhood set to 1
    local_max = maximum_filter(heatmap, footprint=neighborhood) == heatmap

    # Create mask of the background
    background = (heatmap <= threshold)

    # Erode background to prevent artifact when subtracting it from local max
    eroded = binary_erosion(background, structure=neighborhood, border_value=1)

    # XOR to remove background from the local max mask
    detected_peaks = local_max ^ eroded

    # Return obtain final mask of only peaks
    # Shape of heatmap but peaks are 1s & else is 0s
    return detected_peaks
Beispiel #46
0
    def surfd(self,input1, input2, sampling=1, connectivity=1):
        '''
        function to compute the surface distance
        input params:
            input1: predicted segmentation mask
            input2: ground truth mask
            sampling: default value
            connectivity: default value
        returns:
            sds : surface distance
        '''
        input_1 = np.atleast_1d(input1.astype(np.bool))
        input_2 = np.atleast_1d(input2.astype(np.bool))
        conn = morphology.generate_binary_structure(input_1.ndim, connectivity)

        #binary erosion on input1
        y=morphology.binary_erosion(input_1, conn)
        y=y.astype(np.float32)
        x=input_1.astype(np.float32)
        S=x-y

        #binary erosion on input2
        y=morphology.binary_erosion(input_2, conn)
        y=y.astype(np.float32)
        x=input_2.astype(np.float32)
        Sprime=x-y

        S=S.astype(np.bool)
        Sprime=Sprime.astype(np.bool)

        dta = morphology.distance_transform_edt(~S,sampling)
        dtb = morphology.distance_transform_edt(~Sprime,sampling)

        sds = np.concatenate([np.ravel(dta[Sprime!=0]), np.ravel(dtb[S!=0])])

        return sds
Beispiel #47
0
def local_maxima(arr):
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    """
    Takes an array and detects the troughs using the local maximum filter.
    Returns a boolean mask of the troughs (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define an connected neighborhood
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
    neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)
    # apply the local maximum filter; all locations of maximal value in their neighborhood are set to 1
    # local_max is a mask that contains the peaks we are looking for, but also the background. In order to isolate
    # the peaks we must remove the background from the mask.
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#maximum_filter
    local_max = (filters.maximum_filter(arr, footprint=neighborhood) == arr)

    # we create the mask of the background
    # mxu: in the original version, was background = (arr==0)
    background = (arr == arr.min())

    # a little technicality: we must erode the background in order to
    # successfully subtract it from local_max, otherwise a line will
    # appear along the background border (artifact of the local maximum filter)
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    eroded_background = morphology.binary_erosion(background,
                                                  structure=neighborhood,
                                                  border_value=1)

    # # we obtain the final mask, containing only peaks, by removing the background from the local_max mask
    # # mxu: this is the old version, but the boolean minus operator is deprecated
    # detected_maxima = local_max - eroded_backround

    # Material nonimplication, see http://en.wikipedia.org/wiki/Material_nonimplication
    detected_maxima = np.bitwise_and(local_max,
                                     np.bitwise_not(eroded_background))
    return np.where(detected_maxima)
Beispiel #48
0
def get2DPeaks(arr2D):
    """
        Generates peaks of a spectogram.
        Args:
            arr2D: spectogram.
        Returns:
            List of pairs (time, frequency) of peaks.
    """

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
Beispiel #49
0
def bwperim(BW, dim=3, conn=6):
    # generate_binary_structure and matlab bwperim interpret conn differently
    # Here we keep consist with Matlab function
    # vol = np.bool(vol)
    BW = BW > 0
    if dim == 2:
        if conn in [4, 8]:
            if conn == 4:
                conn = 1
            else:
                conn = 2
    elif dim == 3:
        if conn in [6, 18, 26]:
            if conn == 6:
                conn = 1
            elif conn == 18:
                conn = 2
            else:
                conn = 3
    # print(dim)
    conn_structure = generate_binary_structure(dim, conn)
    BWErode = binary_erosion(BW, structure=conn_structure)
    BWPeirm = BW ^ BWErode
    return BWPeirm
    def detect_peaks(self, image):
        """
        Takes an image and detect the peaks usingthe local maximum filter.
        Returns a boolean mask of the peaks (i.e. 1 when
        the pixel's value is the neighborhood maximum, 0 otherwise)

        :param image: An 2d input images
        :return: Binary output images of the same size as input with pixel value equal
        to 1 indicating that there is peak at that point
        """

        # define an 8-connected neighborhood
        neighborhood = generate_binary_structure(2, 2)

        # apply the local maximum filter; all pixel of maximal value
        # in their neighborhood are set to 1
        local_max = maximum_filter(image, footprint=neighborhood) == image
        # local_max is a mask that contains the peaks we are
        # looking for, but also the background.
        # In order to isolate the peaks we must remove the background from the mask.

        # we create the mask of the background
        background = (image == 0)

        # a little technicality: we must erode the background in order to
        # successfully subtract it form local_max, otherwise a line will
        # appear along the background border (artifact of the local maximum filter)
        eroded_background = binary_erosion(background,
                                           structure=neighborhood,
                                           border_value=1)

        # we obtain the final mask, containing only peaks,
        # by removing the background from the local_max mask (xor operation)
        detected_peaks = local_max ^ eroded_background

        return detected_peaks
Beispiel #51
0
def process_mask(mask):
    """
    mask标注预处理相关
    :param mask: 
    :return: 
    """
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        # np.ascontiguousarray方法copy返回一个跟参数array有一样shape的连续数组
        mask1 = np.ascontiguousarray(mask[i_layer])
        if np.sum(mask1) > 0:  # 有mask目标
            # convex_hull_image是CV高级形态学处理函数,输入为二值图像,输出一个逻辑二值图像,在凸包内的点为True, 否则为False
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2) > 1.5 * np.sum(
                    mask1):  # 凸包生成的凸多边形太过分,掩盖了原始mask1的大致形状信息,则放弃凸包处理
                mask2 = mask1
        else:  # 没有mask目标
            mask2 = mask1
        convex_mask[i_layer] = mask2
    # 二值膨胀
    struct = generate_binary_structure(3, 1)
    dilatedMask = binary_dilation(convex_mask, structure=struct, iterations=10)
    # 返回膨胀后的mask
    return dilatedMask
Beispiel #52
0
    def _remove_outside_body(self, img_patch):

        h, w = img_patch.shape

        se = generate_binary_structure(2, 2)
        img_patch = binary_dilation(img_patch, structure=se).astype('uint8')

        for c in [0, w - 1]:
            x = np.where(img_patch[:, c] == 0)[0]
            while(len(x)):
                mask = region_growing([[x[0], c]], img_patch == 0)
                img_patch += mask
                x = np.where(img_patch[:, c] == 0)[0]

        for r in [0, h - 1]:
            x = np.where(img_patch[r, :] == 0)[0]
            while(len(x)):
                mask = region_growing([[r, x[0]]], img_patch == 0)
                img_patch += mask
                x = np.where(img_patch[r, :] == 0)[0]

        img_patch = binary_erosion(img_patch, structure=se).astype('uint8')

        return img_patch
Beispiel #53
0
print(channel1)
print(type(channel1))
print(np.shape(channel1))

print(cqt)
print(type(cqt))
print(np.shape(cqt))
print(cqt.ndim)
sss

# 2D Peaks (peaks in spectrogram)

# Import
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
                                      iterate_structure, binary_erosion)

# variables:
PEAK_NEIGHBORHOOD_SIZE = 20
axes = librosa_display.specshow(librosa.amplitude_to_db(cqt),
                                sr=sr,
                                x_axis='time',
                                y_axis='hz',
                                cmap='plasma')

struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our filter shape
local_max = maximum_filter(axes, footprint=neighborhood) == axes
Beispiel #54
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3, normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True, b_in=None,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1,
                              method_ls='lasso_lars', update_background_components=True, low_rank_background=True, block_size=1000, num_blocks_per_run=20):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters:
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).

    C: np.ndarray
        calcium activity of each neuron.

    f: np.ndarray
        temporal profile  of background activity.

    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A.
        Otherwise it is used to determine it through determine_search_location

    b_in: np.ndarray
        you can pass background as input, especially in the case of one background per patch, since it will update using hals

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int

    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread

    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    update_background_components:bool
        whether to update the background components in the spatial phase

    low_rank_background:bool
        whether to update the using a low rank approximation. In the False case all the nonzero elements of the background components are updated using hals
        (to be used with one background per patch)


    Returns:
    --------
    A: np.ndarray
         new estimate of spatial footprints

    b: np.ndarray
        new estimate of spatial background

    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    f: np.ndarray
        same as f_in except if empty component deleted.

    Raise:
    -------
    Exception('You need to define the input dimensions')

    Exception('Dimension of Matrix Y must be pixels x time')

    Exception('Dimension of Matrix C must be neurons x time')

    Exception('Dimension of Matrix f must be background comps x time ')

    Exception('Either A or C need to be determined')

    Exception('Dimension of Matrix A must be pixels x neurons ')

    Exception('You need to provide estimate of C and f')

    Exception('Not implemented consistently')

    Exception("Failed to delete: " + folder)
    """
    print('Initializing update of Spatial Components')

    if expandCore is None:
        expandCore = iterate_structure(
            generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    # shape transformation and tests
    Y, A_in, C, f, n_pixels_per_process, rank_f, d, T = test(
        Y, A_in, C, f, n_pixels_per_process, nb)

    start_time = time.time()
    print('computing the distance indicators')
    # we compute the indicator from distance indicator
    ind2_, nr, C, f, b_, A_in = computing_indicator(
        Y, A_in, b_in, C, f, nb, method, dims, min_size, max_size, dist, expandCore, dview)
    if normalize_yyt_one and C is not None:
        C = np.array(C)
        nr_C = np.shape(C)[0]
        d_ = scipy.sparse.lil_matrix((nr_C, nr_C))
        d_.setdiag(np.sqrt(np.sum(C ** 2, 1)))
        A_in = A_in * d_
        C = old_div(C, np.sqrt(np.sum(C ** 2, 1)[:, np.newaxis]))

    if b_in is None:
        b_in = b_

    print('memmaping')
    # we create a memory map file if not already the case, we send Cf, a
    # matrix that include background components
    C_name, Y_name, folder = creatememmap(Y, np.vstack((C, f)), dview)

    # we create a pixel group array (chunks for the cnmf)for the parrallelization of the process
    print('Updating Spatial Components using lasso lars')
    cct = np.diag(C.dot(C.T))
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, i + n_pixels_per_process)), method_ls, cct, ])
    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, np.prod(dims))), method_ls, cct])
    A_ = np.zeros((d, nr + np.size(f, 0)))  # init A_
    if dview is not None:
        if 'multiprocessing' in str(type(dview)):
            parallel_result = dview.map_async(
                regression_ipyparallel, pixel_groups).get(4294967)
        else:
            parallel_result = dview.map_sync(
                regression_ipyparallel, pixel_groups)
            dview.results.clear()
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))

    for chunk in parallel_result:
        for pars in chunk:
            px, idxs_, a = pars
            A_[px, idxs_] = a

    print("thresholding components")
    A_ = threshold_components(A_, dims, dview=dview, medw=medw, thr_method=thr_method,
                              maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc, se=se, ss=ss)

    ff = np.where(np.sum(A_, axis=0) == 0)  # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating {} empty spatial components'.format(len(ff)))
        A_ = np.delete(A_, list(ff[ff < nr]), 1)
        C = np.delete(C, list(ff[ff < nr]), 0)
        nr = nr - len(ff[ff < nr])
        if low_rank_background:
            background_ff = list(filter(lambda i: i >= nb, ff - nr))
            f = np.delete(f, background_ff, 0)
        else:
            background_ff = list(filter(lambda i: i >= 0, ff - nr))
            f = np.delete(f, background_ff, 0)
            b_in = np.delete(b_in, background_ff, 1)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y, f.T, dview=dview, block_size=block_size, num_blocks_per_run=num_blocks_per_run) - \
            A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        # Y*f' - A*(C*f')
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    if update_background_components:

        if b_in is None:
            # update baseline based on residual
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)
        else:
            ind_b = [np.where(_b)[0] for _b in b_in.T]
            b = HALS4shape_bckgrnd(Y_resf, b_in, f, ind_b)

    else:
        if b_in is None:
            raise Exception(
                'If you set the update_background_components to True you have to pass them as input to update_spatial')
        # try:
        #    b = np.delete(b_in, background_ff, 0)
        # except NameError:
        b = b_in

    print(("--- %s seconds ---" % (time.time() - start_time)))
    try:  # clean up
        # remove temporary file created
        print("Removing tempfiles created")
        shutil.rmtree(folder)
    except:
        raise Exception("Failed to delete: " + folder)

    return A_, b, C, f
Beispiel #55
0
def determine_search_location(A, dims, method='ellipse', min_size=3, max_size=8, dist=3,
                              expandCore=iterate_structure(generate_binary_structure(2, 1), 2).astype(int), dview=None):
    """
    compute the indices of the distance from the cm to search for the spatial component

    does it by following an ellipse from the cm or doing a step by step dilatation around the cm


    Parameters:
    ----------
    [parsed]
     cm[i]:
        center of mass of each neuron

     A[:, i]: the A of each components

     dims:
        the dimension of each A's ( same usually )

     dist:
        computed distance matrix

     dims: [optional] tuple
                x, y[, z] movie dimensions

    method: [optional] string
            method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
            if method is dilate this represents the kernel used for expansion

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int

    dims: [optional] tuple
             x, y[, z] movie dimensions

    Returns:
    --------
    dist_indicator: np.ndarray
        distance from the cm to search for the spatial footprint

    Raise:
    -------
    Exception('You cannot pass empty (all zeros) components!')
    """

    from scipy.ndimage.morphology import grey_dilation

    # we initialize the values
    if len(dims) == 2:
        d1, d2 = dims
    elif len(dims) == 3:
        d1, d2, d3 = dims
    d, nr = np.shape(A)
    A = csc_matrix(A)
    dist_indicator = scipy.sparse.csc_matrix((d, nr),dtype= np.float32)

    if method == 'ellipse':
        Coor = dict()
        # we create a matrix of size A.x of each pixel coordinate in A.y and inverse
        if len(dims) == 2:
            Coor['x'] = np.kron(np.ones(d2), list(range(d1)))
            Coor['y'] = np.kron(list(range(d2)), np.ones(d1))
        elif len(dims) == 3:
            Coor['x'] = np.kron(np.ones(d3 * d2), list(range(d1)))
            Coor['y'] = np.kron(
                np.kron(np.ones(d3), list(range(d2))), np.ones(d1))
            Coor['z'] = np.kron(list(range(d3)), np.ones(d2 * d1))
        if not dist == np.inf:  # determine search area for each neuron
            cm = np.zeros((nr, len(dims)))  # vector for center of mass
            Vr = []  # cell(nr,1);
            dist_indicator = []
            pars = []
            # for each dim
            for i, c in enumerate(['x', 'y', 'z'][:len(dims)]):
                # mass center in this dim = (coor*A)/sum(A)
                cm[:, i] = old_div(
                    np.dot(Coor[c], A[:, :nr].todense()), A[:, :nr].sum(axis=0))

            # parrallelizing process of the construct ellipse function
            for i in range(nr):
                pars.append([Coor, cm[i], A[:, i], Vr, dims,
                             dist, max_size, min_size, d])
            if dview is None:
                res = list(map(construct_ellipse_parallel, pars))
            else:
                if 'multiprocessing' in str(type(dview)):
                    res = dview.map_async(
                        construct_ellipse_parallel, pars).get(4294967)
                else:
                    res = dview.map_sync(construct_ellipse_parallel, pars)
            for r in res:
                dist_indicator.append(r)

            dist_indicator = (np.asarray(dist_indicator)).squeeze().T

        else:
            raise Exception('Not implemented')
            dist_indicator = True * np.ones((d, nr))

    elif method == 'dilate':
        for i in range(nr):
            A_temp = np.reshape(A[:, i].toarray(), dims[::-1])
            if len(expandCore) > 0:
                if len(expandCore.shape) < len(dims):  # default for 3D
                    expandCore = iterate_structure(
                        generate_binary_structure(len(dims), 1), 2).astype(int)
                A_temp = grey_dilation(A_temp, footprint=expandCore)
            else:
                A_temp = grey_dilation(A_temp, [1] * len(dims))

            dist_indicator[:, i] = scipy.sparse.coo_matrix(np.squeeze(np.reshape(A_temp, (d, 1)))[:,None] > 0)
    else:
        raise Exception('Not implemented')
        dist_indicator = True * np.ones((d, nr))

    return dist_indicator
def find_activated_parcels(ppm_map, parcels_mask, ppm_threshold,
                           cluster_size_threshold):
    """Find activated parcels for a given PPM map.

    Parameters
    ----------
    ppm_map : ~numpy.ndarray
        Posterior Probability Map data
    parcels_mask : ~numpy.ndarray
        parcellation mask data
    ppm_threshold : float
        threshold for the PPM
    cluster_size_threshold : int
        threshold for the size of the cluster of activated neurons

    Returns
    -------
    list of int
        Activated parcels
        
    Examples
    --------
    >>> from nilearn.image import load_img
    >>> parcels_img = load_img(PARCELLATION_MASK)
    >>> parcels_img_data = parcels_img.get_data()
    >>> ppm_img = load_img(os.path.join('/home/jariasal/scipy_notebook/output/114/pyhrf_output', 'jde_vem_ppm_a_nrl_Finger.nii'))
    >>> print(find_activated_parcels(ppm_img.get_data(), parcels_img_data, 0.99999, 50))
    [ 21 101 290 417 485]

    """
    ppm_map[np.isnan(ppm_map)] = 0.
    ppm_map_mask = (ppm_map >= ppm_threshold)  # binary ppm_map
    binary_structure = generate_binary_structure(
        3, 2)  # structuring element that defines neighborhood connections

    ppm_map_labeled, nb_labels = label(ppm_map_mask,
                                       structure=binary_structure)

    if not nb_labels:  # always select at least one cluster
        ppm_threshold = ppm_map.max() * 0.9999
        ppm_map_mask = (ppm_map >= ppm_threshold)
        ppm_map_labeled, nb_labels = label(ppm_map_mask,
                                           structure=binary_structure)
        print("ppm_threshold set to {}".format(ppm_threshold))

    # number of active voxels for each cluster
    clusters_size = ndi_sum(ppm_map_mask,
                            ppm_map_labeled,
                            index=np.arange(nb_labels + 1))

    # clusters (labels) whose number of active voxels is greater than the threshold
    active_clusters = clusters_size >= cluster_size_threshold

    if not active_clusters.any():  # threshold for cluster size is too high
        cluster_size_threshold = np.amax(clusters_size)
        active_clusters = clusters_size >= cluster_size_threshold
        print(
            "cluster_size_threshold set to {}".format(cluster_size_threshold))

    # voxels belonging the clusters whose number of active voxels is greater than the threshold
    active_clusters_mask = active_clusters[ppm_map_labeled]
    active_parcels = np.unique(parcels_mask[active_clusters_mask])

    return active_parcels
Beispiel #57
0
def generate_morph_drain_curv(seg_image_input,R_critical):
    # The method for this function follows Hilper & Miller AWR(2001)
    # 1. Perform erosion for the pore space with radius of R_critical
    # 2. Label the eroded pore space, and leave only the pore space that is still 
    #    connected with the non-wetting phase reservoir
    # 3. Perform the dilation for the labelled pore space with radius of R_critical
    # ****************************************************************************
    # Currently I am provided with a 3D SignDist image which has positive values
    # in the pore space and 0.0 in the solid phase.
    # ****************************************************************************
    if seg_image_input.ndim == 2:

        pore_vol = 1.0*(seg_image_input>0.0).sum()
        radius = R_critical
        print 'Morphological Drainage: processing critical radius: '+str(radius)+' now......'

        # Step 1.1: Create structuring element
        domain_size = int(np.rint(radius*2)+2)
        grid = np.indices((domain_size,domain_size))
        mk_circle = (grid[0]-domain_size/2)**2 + (grid[1]-domain_size/2)**2 <= radius**2
        circle = np.zeros((domain_size,domain_size),dtype=np.uint8)
        circle[mk_circle]=1
        circle = extract_shape(circle).astype(bool)

        # Step 1.2: Perform erosion on the pore space
        # NOTE: the dtype of 'seg_im_ero' is 'bool'
        seg_im_ero = morphology.binary_erosion(seg_image_input>0.0,structure=circle,border_value=1)
        # NOTE: 'border_value' for erosion should be 'True'

        # Step 2: Label the eroded pore space
        # NOTE: Assume the NW phase reservoir is at the first layer of the domain
        #       i.e. at seg_image[0,:] - adjust it if this does not suit your need
        # For erosion, assume that diagonals are not considered
        seg_im_ero_label_temp,num_features = measurements.label(seg_im_ero,structure=morphology.generate_binary_structure(2,1))
        #seg_im_ero_label_temp,num_features = measurements.label(seg_im_ero,structure=morphology.generate_binary_structure(2,2))
        # NOTE: Here I assume the inlet is at the first layer of the array's axis=2 (i.e. domain[0,:,:])\
        #       You can always change to any other layers as the inlet for this drainage.
        label_check = seg_im_ero_label_temp[0,seg_im_ero_label_temp[0,:]!=0]
        label_check = np.unique(label_check)

        # NOTE the following lines are only for your to check things
        # ******************** For check *******************************#
        # It assign the labelled array as: NW -> 1, W -> 2, Solid -> 0
        #seg_im_ero_label_show = seg_im_ero_label.copy()
        #seg_im_ero_label_show[seg_im_ero_label_show !=1] = 2
        #seg_im_ero_label_show[np.logical_not(seg_image_2d)]=0
        # ******************** End: for check **************************#
        
        seg_im_ero_label = np.zeros_like(seg_im_ero_label_temp,dtype=bool)
        for labels in label_check:
            seg_im_ero_label = np.logical_or(seg_im_ero_label,seg_im_ero_label_temp==labels)
        #seg_im_ero_label = seg_im_ero_label.astype(np.uint8)
        
        # Step 3: perform dilation on the labelled pore space 
        seg_im_ero_label_dil = morphology.binary_dilation(seg_im_ero_label,structure=circle,border_value=0)
        # NOTE: 'border_value' for dilation should be 'False'
        # NOTE: the dtype of 'seg_im_ero_label_dil' is 'bool'
        seg_im_ero_label_dil[seg_image_input<=0.0]=False
        
        Sw = 1.0 - seg_im_ero_label_dil.sum()/pore_vol
    else:

        pore_vol = 1.0*(seg_image_input>0.0).sum()
        radius = R_critical
        print 'Morphological Drainage: processing critical radius: '+str(radius)+' now......'

        # Step 1.1: Create structuring element
        domain_size = int(np.rint(radius*2)+2)
        grid = np.indices((domain_size,domain_size,domain_size))
        mk_circle = (grid[0]-domain_size/2)**2 + (grid[1]-domain_size/2)**2 + (grid[2]-domain_size/2)**2 <= radius**2
        circle = np.zeros((domain_size,domain_size,domain_size),dtype=np.uint8)
        circle[mk_circle]=1
        circle = extract_shape(circle).astype(bool)

        # Step 1.2: Perform erosion on the pore space
        # NOTE: the dtype of 'seg_im_ero' is 'bool'
        seg_im_ero = morphology.binary_erosion(seg_image_input>0.0,structure=circle,border_value=1)
        # NOTE: 'border_value' for erosion should be 'True'

        # Step 2: Label the eroded pore space
        # NOTE: Assume the NW phase reservoir is at the first layer of the domain
        #       i.e. at seg_image[0,:] - adjust it if this does not suit your need
        # For erosion, assume that diagonals are not considered
        seg_im_ero_label_temp,num_features = measurements.label(seg_im_ero,structure=morphology.generate_binary_structure(3,1))
        #seg_im_ero_label_temp,num_features = measurements.label(seg_im_ero,structure=morphology.generate_binary_structure(3,3))
        # NOTE: Here I assume the inlet is at the first layer of the array's axis=2 (i.e. domain[0,:,:])\
        #       You can always change to any other layers as the inlet for this drainage.
        label_check = seg_im_ero_label_temp[0,seg_im_ero_label_temp[0,:]!=0]
        label_check = np.unique(label_check)

        # NOTE the following lines are only for your to check things
        # ******************** For check *******************************#
        # It assign the labelled array as: NW -> 1, W -> 2, Solid -> 0
        #seg_im_ero_label_show = seg_im_ero_label.copy()
        #seg_im_ero_label_show[seg_im_ero_label_show !=1] = 2
        #seg_im_ero_label_show[np.logical_not(seg_image_2d)]=0
        # ******************** End: for check **************************#
        
        seg_im_ero_label = np.zeros_like(seg_im_ero_label_temp,dtype=bool)
        for labels in label_check:
            seg_im_ero_label = np.logical_or(seg_im_ero_label,seg_im_ero_label_temp==labels)
        #seg_im_ero_label = seg_im_ero_label.astype(np.uint8)
        
        # Step 3: perform dilation on the labelled pore space 
        seg_im_ero_label_dil = morphology.binary_dilation(seg_im_ero_label,structure=circle,border_value=0)
        # NOTE: 'border_value' for dilation should be 'False'
        # NOTE: the dtype of 'seg_im_ero_label_dil' is 'bool'
        seg_im_ero_label_dil[seg_image_input<=0.0]=False
        
        Sw = 1.0 - seg_im_ero_label_dil.sum()/pore_vol
    #end if 
    return Sw
Beispiel #58
0
    def __init__(self, fnames=None, dims=None, dxy=(1, 1),
                 border_pix=0, del_duplicates=False, low_rank_background=True,
                 memory_fact=1, n_processes=1, nb_patch=1, p_ssub=2, p_tsub=2,
                 remove_very_bad_comps=False, rf=None, stride=None,
                 check_nan=True, n_pixels_per_process=None,
                 k=30, alpha_snmf=100, center_psf=False, gSig=[5, 5], gSiz=None,
                 init_iter=2, method_init='greedy_roi', min_corr=.85,
                 min_pnr=20, gnb=1, normalize_init=True, options_local_NMF=None,
                 ring_size_factor=1.5, rolling_length=100, rolling_sum=True,
                 ssub=2, ssub_B=2, tsub=2,
                 block_size_spat=5000, num_blocks_per_run_spat=20,
                 block_size_temp=5000, num_blocks_per_run_temp=20,
                 update_background_components=True,
                 method_deconvolution='oasis', p=2, s_min=None,
                 do_merge=True, merge_thresh=0.8,
                 decay_time=0.4, fr=30, min_SNR=2.5, rval_thr=0.8,
                 N_samples_exceptionality=None, batch_update_suff_stat=False,
                 expected_comps=500, iters_shape=5, max_comp_update_shape=np.inf,
                 max_num_added=5, min_num_trial=5, minibatch_shape=100, minibatch_suff_stat=5,
                 n_refit=0, num_times_comp_updated=np.inf, simultaneously=False,
                 sniper_mode=False, test_both=False, thresh_CNN_noisy=0.5,
                 thresh_fitness_delta=-50, thresh_fitness_raw=None, thresh_overlap=0.5,
                 update_freq=200, update_num_comps=True, use_dense=True, use_peak_max=True,
                 only_init_patch=True, var_name_hdf5='mov', max_merge_area=None, params_dict={},
                 ):
        """Class for setting the processing parameters. All parameters for CNMF, online-CNMF, quality testing,
        and motion correction can be set here and then used in the various processing pipeline steps.
        The prefered way to set parameters is by using the set function, where a subclass is determined and a
        dictionary is passed. The whole dictionary can also be initialized at once by passing a dictionary params_dict
        when initializing the CNMFParams object. Direct setting of the positional arguments in CNMFParams is only
        present for backwards compatibility reasons and should not be used if possible.

        Args:
            Any parameter that is not set get a default value specified
            by the dictionary default options
        DATA PARAMETERS (CNMFParams.data) #####

            fnames: list[str]
                list of complete paths to files that need to be processed

            dims: (int, int), default: computed from fnames
                dimensions of the FOV in pixels

            fr: float, default: 30
                imaging rate in frames per second

            decay_time: float, default: 0.4
                length of typical transient in seconds

            dxy: (float, float)
                spatial resolution of FOV in pixels per um

            var_name_hdf5: str, default: 'mov'
                if loading from hdf5 name of the variable to load

            caiman_version: str
                version of CaImAn being used

            last_commit: str
                hash of last commit in the caiman repo

            mmap_F: list[str]
                paths to F-order memory mapped files after motion correction

            mmap_C: str
                path to C-order memory mapped file after motion correction

        PATCH PARAMS (CNMFParams.patch)######

            rf: int or None, default: None
                Half-size of patch in pixels. If None, no patches are constructed and the whole FOV is processed jointly

            stride: int or None, default: None
                Overlap between neighboring patches in pixels.

            nb_patch: int, default: 1
                Number of (local) background components per patch

            border_pix: int, default: 0
                Number of pixels to exclude around each border.

            low_rank_background: bool, default: True
                Whether to update the background using a low rank approximation.
                If False all the nonzero elements of the background components are updated using hals
                (to be used with one background per patch)

            del_duplicates: bool, default: False
                Delete duplicate components in the overlaping regions between neighboring patches. If False,
                then merging is used.

            only_init: bool, default: True
                whether to run only the initialization

            p_patch: int, default: 0
                order of AR dynamics when processing within a patch

            skip_refinement: bool, default: False
                Whether to skip refinement of components (deprecated?)

            remove_very_bad_comps: bool, default: True
                Whether to remove (very) bad quality components during patch processing

            p_ssub: float, default: 2
                Spatial downsampling factor

            p_tsub: float, default: 2
                Temporal downsampling factor

            memory_fact: float, default: 1
                unitless number for increasing the amount of available memory

            n_processes: int
                Number of processes used for processing patches in parallel

            in_memory: bool, default: True
                Whether to load patches in memory

        PRE-PROCESS PARAMS (CNMFParams.preprocess) #############

            sn: np.array or None, default: None
                noise level for each pixel

            noise_range: [float, float], default: [.25, .5]
                range of normalized frequencies over which to compute the PSD for noise determination

            noise_method: 'mean'|'median'|'logmexp', default: 'mean'
                PSD averaging method for computing the noise std

            max_num_samples_fft: int, default: 3*1024
                Chunk size for computing the PSD of the data (for memory considerations)

            n_pixels_per_process: int, default: 1000
                Number of pixels to be allocated to each process

            compute_g': bool, default: False
                whether to estimate global time constant

            p: int, default: 2
                 order of AR indicator dynamics

            lags: int, default: 5
                number of lags to be considered for time constant estimation

            include_noise: bool, default: False
                    flag for using noise values when estimating g

            pixels: list, default: None
                 pixels to be excluded due to saturation

            check_nan: bool, default: True
                whether to check for NaNs

        INIT PARAMS (CNMFParams.init)###############

            K: int, default: 30
                number of components to be found (per patch or whole FOV depending on whether rf=None)

            SC_kernel: {'heat', 'cos', binary'}, default: 'heat'
                kernel for graph affinity matrix

            SC_sigma: float, default: 1
                variance for SC kernel

            SC_thr: float, default: 0,
                threshold for affinity matrix

            SC_normalize: bool, default: True
                standardize entries prior to computing the affinity matrix

            SC_use_NN: bool, default: False
                sparsify affinity matrix by using only nearest neighbors

            SC_nnn: int, default: 20
                number of nearest neighbors to use

            gSig: [int, int], default: [5, 5]
                radius of average neurons (in pixels)

            gSiz: [int, int], default: [int(round((x * 2) + 1)) for x in gSig],
                half-size of bounding box for each neuron

            center_psf: bool, default: False
                whether to use 1p data processing mode. Set to true for 1p

            ssub: float, default: 2
                spatial downsampling factor

            tsub: float, default: 2
                temporal downsampling factor

            nb: int, default: 1
                number of background components

            lambda_gnmf: float, default: 1.
                regularization weight for graph NMF

            maxIter: int, default: 5
                number of HALS iterations during initialization

            method_init: 'greedy_roi'|'greedy_pnr'|'sparse_NMF'|'local_NMF' default: 'greedy_roi'
                initialization method. use 'greedy_pnr' for 1p processing and 'sparse_NMF' for dendritic processing.

            min_corr: float, default: 0.85
                minimum value of correlation image for determining a candidate component during greedy_pnr

            min_pnr: float, default: 20
                minimum value of psnr image for determining a candidate component during greedy_pnr

            ring_size_factor: float, default: 1.5
                radius of ring (*gSig) for computing background during greedy_pnr

            ssub_B: float, default: 2
                downsampling factor for background during greedy_pnr

            init_iter: int, default: 2
                number of iterations during greedy_pnr (1p) initialization

            nIter: int, default: 5
                number of rank-1 refinement iterations during greedy_roi initialization

            rolling_sum: bool, default: True
                use rolling sum (as opposed to full sum) for determining candidate centroids during greedy_roi

            rolling_length: int, default: 100
                width of rolling window for rolling sum option

            kernel: np.array or None, default: None
                user specified template for greedyROI

            max_iter_snmf : int, default: 500
                maximum number of iterations for sparse NMF initialization

            alpha_snmf: float, default: 100
                sparse NMF sparsity regularization weight

            sigma_smooth_snmf : (float, float, float), default: (.5,.5,.5)
                std of Gaussian kernel for smoothing data in sparse_NMF

            perc_baseline_snmf: float, default: 20
                percentile to be removed from the data in sparse_NMF prior to decomposition

            normalize_init: bool, default: True
                whether to equalize the movies during initialization

            options_local_NMF: dict
                dictionary with parameters to pass to local_NMF initializer

        SPATIAL PARAMS (CNMFParams.spatial) ##########

            method_exp: 'dilate'|'ellipse', default: 'dilate'
                method for expanding footprint of spatial components

            dist: float, default: 3
                expansion factor of ellipse

            expandCore: morphological element, default: None(?)
                morphological element for expanding footprints under dilate

            nb: int, default: 1
                number of global background components

            n_pixels_per_process: int, default: 1000
                number of pixels to be processed by each worker

            thr_method: 'nrg'|'max', default: 'nrg'
                thresholding method

            maxthr: float, default: 0.1
                Max threshold

            nrgthr: float, default: 0.9999
                Energy threshold

            extract_cc: bool, default: True
                whether to extract connected components during thresholding
                (might want to turn to False for dendritic imaging)

            medw: (int, int) default: None
                window of median filter (set to (3,)*len(dims) in cnmf.fit)

            se: np.array or None, default: None
                 Morphological closing structuring element (set to np.ones((3,)*len(dims), dtype=np.uint8) in cnmf.fit)

            ss: np.array or None, default: None
                Binary element for determining connectivity (set to np.ones((3,)*len(dims), dtype=np.uint8) in cnmf.fit)

            update_background_components: bool, default: True
                whether to update the spatial background components

            method_ls: 'lasso_lars'|'nnls_L0', default: 'lasso_lars'
                'nnls_L0'. Nonnegative least square with L0 penalty
                'lasso_lars' lasso lars function from scikit learn

            block_size : int, default: 5000
                Number of pixels to process at the same time for dot product. Reduce if you face memory problems

            num_blocks_per_run: int, default: 20
                Parallelization of A'*Y operation

            normalize_yyt_one: bool, default: True
                Whether to normalize the C and A matrices so that diag(C*C.T) = 1 during update spatial

        TEMPORAL PARAMS (CNMFParams.temporal)###########

            ITER: int, default: 2
                block coordinate descent iterations

            method_deconvolution: 'oasis'|'cvxpy'|'oasis', default: 'oasis'
                method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy')
                if method cvxpy, primary and secondary (if problem unfeasible for approx solution)

            solvers: 'ECOS'|'SCS', default: ['ECOS', 'SCS']
                 solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT'

            p: 0|1|2, default: 2
                order of AR indicator dynamics

            memory_efficient: False

            bas_nonneg: bool, default: True
                whether to set a non-negative baseline (otherwise b >= min(y))

            noise_range: [float, float], default: [.25, .5]
                range of normalized frequencies over which to compute the PSD for noise determination

            noise_method: 'mean'|'median'|'logmexp', default: 'mean'
                PSD averaging method for computing the noise std

            lags: int, default: 5
                number of autocovariance lags to be considered for time constant estimation

            optimize_g: bool, default: False
                flag for optimizing time constants

            fudge_factor: float (close but smaller than 1) default: .96
                bias correction factor for discrete time constants

            nb: int, default: 1
                number of global background components

            verbosity: bool, default: False
                whether to be verbose

            block_size : int, default: 5000
                Number of pixels to process at the same time for dot product. Reduce if you face memory problems

            num_blocks_per_run: int, default: 20
                Parallelization of A'*Y operation

            s_min: float or None, default: None
                Minimum spike threshold amplitude (computed in the code if used).

        MERGE PARAMS (CNMFParams.merge)#####
            do_merge: bool, default: True
                Whether or not to merge

            thr: float, default: 0.8
                Trace correlation threshold for merging two components.

            merge_parallel: bool, default: False
                Perform merging in parallel

            max_merge_area: int or None, default: None
                maximum area (in pixels) of merged components, used to determine whether to merge components during fitting process

        QUALITY EVALUATION PARAMETERS (CNMFParams.quality)###########

            min_SNR: float, default: 2.5
                trace SNR threshold. Traces with SNR above this will get accepted

            SNR_lowest: float, default: 0.5
                minimum required trace SNR. Traces with SNR below this will get rejected

            rval_thr: float, default: 0.8
                space correlation threshold. Components with correlation higher than this will get accepted

            rval_lowest: float, default: -1
                minimum required space correlation. Components with correlation below this will get rejected

            use_cnn: bool, default: True
                flag for using the CNN classifier.

            min_cnn_thr: float, default: 0.9
                CNN classifier threshold. Components with score higher than this will get accepted

            cnn_lowest: float, default: 0.1
                minimum required CNN threshold. Components with score lower than this will get rejected.

            gSig_range: list or integers, default: None
                gSig scale values for CNN classifier. In not None, multiple values are tested in the CNN classifier.

        ONLINE CNMF (ONACID) PARAMETERS (CNMFParams.online)#####

            N_samples_exceptionality: int, default: np.ceil(decay_time*fr),
                Number of frames over which trace SNR is computed (usually length of a typical transient)

            batch_update_suff_stat: bool, default: False
                Whether to update sufficient statistics in batch mode

            ds_factor: int, default: 1,
                spatial downsampling factor for faster processing (if > 1)

            dist_shape_update: bool, default: False,
                update shapes in a distributed fashion

            epochs: int, default: 1,
                number of times to go over data

            expected_comps: int, default: 500
                number of expected components (for memory allocation purposes)

            init_batch: int, default: 200,
                length of mini batch used for initialization

            init_method: 'bare'|'cnmf'|'seeded', default: 'bare',
                initialization method

            iters_shape: int, default: 5
                Number of block-coordinate decent iterations for each shape update

            max_comp_update_shape: int, default: np.inf
                Maximum number of spatial components to be updated at each time

            max_num_added: int, default: 5
                Maximum number of new components to be added in each frame

            max_shifts_online: int, default: 10,
                Maximum shifts for motion correction during online processing

            min_SNR: float, default: 2.5
                Trace SNR threshold for accepting a new component

            min_num_trial: int, default: 5
                Number of mew possible components for each frame

            minibatch_shape: int, default: 100
                Number of frames stored in rolling buffer

            minibatch_suff_stat: int, default: 5
                mini batch size for updating sufficient statistics

            motion_correct: bool, default: True
                Whether to perform motion correction during online processing

            movie_name_online: str, default: 'online_movie.avi'
                Name of saved movie (appended in the data directory)

            normalize: bool, default: False
                Whether to normalize each frame prior to online processing

            n_refit: int, default: 0
                Number of additional iterations for computing traces

            num_times_comp_updated: int, default: np.inf

            path_to_model: str, default: os.path.join(caiman_datadir(), 'model', 'cnn_model_online.h5')
                Path to online CNN classifier

            rval_thr: float, default: 0.8
                space correlation threshold for accepting a new component

            save_online_movie: bool, default: False
                Whether to save the results movie

            show_movie: bool, default: False
                Whether to display movie of online processing

            simultaneously: bool, default: False
                Whether to demix and deconvolve simultaneously

            sniper_mode: bool, default: False
                Whether to use the online CNN classifier for screening candidate components (otherwise space
                correlation is used)

            test_both: bool, default: False
                Whether to use both the CNN and space correlation for screening new components

            thresh_CNN_noisy: float, default: 0,5,
                Threshold for the online CNN classifier

            thresh_fitness_delta: float (negative)
                Derivative test for detecting traces

            thresh_fitness_raw: float (negative), default: computed from min_SNR
                Threshold value for testing trace SNR

            thresh_overlap: float, default: 0.5
                Intersection-over-Union space overlap threshold for screening new components

            update_freq: int, default: 200
                Update each shape at least once every X frames when in distributed mode

            update_num_comps: bool, default: True
                Whether to search for new components

            use_dense: bool, default: True
                Whether to store and represent A and b as a dense matrix

            use_peak_max: bool, default: True
                Whether to find candidate centroids using skimage's find local peaks function

        MOTION CORRECTION PARAMETERS (CNMFParams.motion)####

            border_nan: bool or str, default: 'copy'
                flag for allowing NaN in the boundaries. True allows NaN, whereas 'copy' copies the value of the
                nearest data point.

            gSig_filt: int or None, default: None
                size of kernel for high pass spatial filtering in 1p data. If None no spatial filtering is performed

            is3D: bool, default: False
                flag for 3D recordings for motion correction

            max_deviation_rigid: int, default: 3
                maximum deviation in pixels between rigid shifts and shifts of individual patches

            max_shifts: (int, int), default: (6,6)
                maximum shifts per dimension in pixels.

            min_mov: float or None, default: None
                minimum value of movie. If None it get computed.

            niter_rig: int, default: 1
                number of iterations rigid motion correction.

            nonneg_movie: bool, default: True
                flag for producing a non-negative movie.

            num_frames_split: int, default: 80
                split movie every x frames for parallel processing

            num_splits_to_process_els, default: [7, None]
            num_splits_to_process_rig, default: None

            overlaps: (int, int), default: (24, 24)
                overlap between patches in pixels in pw-rigid motion correction.

            pw_rigid: bool, default: False
                flag for performing pw-rigid motion correction.

            shifts_opencv: bool, default: True
                flag for applying shifts using cubic interpolation (otherwise FFT)

            splits_els: int, default: 14
                number of splits across time for pw-rigid registration

            splits_rig: int, default: 14
                number of splits across time for rigid registration

            strides: (int, int), default: (96, 96)
                how often to start a new patch in pw-rigid registration. Size of each patch will be strides + overlaps

            upsample_factor_grid" int, default: 4
                motion field upsampling factor during FFT shifts.

            use_cuda: bool, default: False
                flag for using a GPU.
        """

        self.data = {
            'fnames': fnames,
            'dims': dims,
            'fr': fr,
            'decay_time': decay_time,
            'dxy': dxy,
            'var_name_hdf5': var_name_hdf5,
            'caiman_version': '1.6.2',
            'last_commit': None,
            'mmap_F': None,
            'mmap_C': None
        }

        self.patch = {
            'border_pix': border_pix,
            'del_duplicates': del_duplicates,
            'in_memory': True,
            'low_rank_background': low_rank_background,
            'memory_fact': memory_fact,
            'n_processes': n_processes,
            'nb_patch': nb_patch,
            'only_init': only_init_patch,
            'p_patch': 0,                 # AR order within patch
            'remove_very_bad_comps': remove_very_bad_comps,
            'rf': rf,
            'skip_refinement': False,
            'p_ssub': p_ssub,             # spatial downsampling factor
            'stride': stride,
            'p_tsub': p_tsub,             # temporal downsampling factor
        }

        self.preprocess = {
            'check_nan': check_nan,
            'compute_g': False,          # flag for estimating global time constant
            'include_noise': False,      # flag for using noise values when estimating g
            # number of autocovariance lags to be considered for time constant estimation
            'lags': 5,
            'max_num_samples_fft': 3 * 1024,
            'n_pixels_per_process': n_pixels_per_process,
            'noise_method': 'mean',      # averaging method ('mean','median','logmexp')
            'noise_range': [0.25, 0.5],  # range of normalized frequencies over which to average
            'p': p,                      # order of AR indicator dynamics
            'pixels': None,              # pixels to be excluded due to saturation
            'sn': None,                  # noise level for each pixel
        }

        self.init = {
            'K': k,                   # number of components,
            'SC_kernel': 'heat',         # kernel for graph affinity matrix
            'SC_sigma' : 1,              # std for SC kernel
            'SC_thr': 0,                 # threshold for affinity matrix
            'SC_normalize': True,        # standardize entries prior to
                                         # computing affinity matrix
            'SC_use_NN': False,          # sparsify affinity matrix by using
                                         # only nearest neighbors
            'SC_nnn': 20,                # number of nearest neighbors to use
            'alpha_snmf': alpha_snmf,
            'center_psf': center_psf,
            'gSig': gSig,
            # size of bounding box
            'gSiz': gSiz,
            'init_iter': init_iter,
            'kernel': None,           # user specified template for greedyROI
            'lambda_gnmf' :1,         # regularization weight for graph NMF
            'maxIter': 5,             # number of HALS iterations
            'max_iter_snmf': 500,
            'method_init': method_init,    # can be greedy_roi, greedy_pnr sparse_nmf, local_NMF
            'min_corr': min_corr,
            'min_pnr': min_pnr,
            'nIter': 5,               # number of refinement iterations
            'nb': gnb,                # number of global background components
            # whether to pixelwise equalize the movies during initialization
            'normalize_init': normalize_init,
            # dictionary with parameters to pass to local_NMF initializaer
            'options_local_NMF': options_local_NMF,
            'perc_baseline_snmf': 20,
            'ring_size_factor': ring_size_factor,
            'rolling_length': rolling_length,
            'rolling_sum': rolling_sum,
            'sigma_smooth_snmf': (.5, .5, .5),
            'ssub': ssub,             # spatial downsampling factor
            'ssub_B': ssub_B,
            'tsub': tsub,             # temporal downsampling factor
        }

        self.spatial = {
            'block_size_spat': block_size_spat, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES
            'dist': 3,                       # expansion factor of ellipse
            'expandCore': iterate_structure(generate_binary_structure(2, 1), 2).astype(int),
            # Flag to extract connected components (might want to turn to False for dendritic imaging)
            'extract_cc': True,
            'maxthr': 0.1,                   # Max threshold
            'medw': None,                    # window of median filter
            # method for determining footprint of spatial components ('ellipse' or 'dilate')
            'method_exp': 'dilate',
            # 'nnls_L0'. Nonnegative least square with L0 penalty
            # 'lasso_lars' lasso lars function from scikit learn
            'method_ls': 'lasso_lars',
            # number of pixels to be processed by each worker
            'n_pixels_per_process': n_pixels_per_process,
            'nb': gnb,                        # number of background components
            'normalize_yyt_one': True,
            'nrgthr': 0.9999,                # Energy threshold
            'num_blocks_per_run_spat': num_blocks_per_run_spat, # number of process to parallelize residual computation ** DECREASE IF MEMORY ISSUES
            'se': np.ones((3, 3), dtype='uint8'),  # Morphological closing structuring element
            'ss': np.ones((3, 3), dtype='uint8'),  # Binary element for determining connectivity
            'thr_method': 'nrg',             # Method of thresholding ('max' or 'nrg')
            # whether to update the background components in the spatial phase
            'update_background_components': update_background_components,
        }

        self.temporal = {
            'ITER': 2,                  # block coordinate descent iterations
            # flag for setting non-negative baseline (otherwise b >= min(y))
            'bas_nonneg': False,
            # number of pixels to process at the same time for dot product. Make it
            # smaller if memory problems
            'block_size_temp': block_size_temp, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES
            # bias correction factor (between 0 and 1, close to 1)
            'fudge_factor': .96,
            # number of autocovariance lags to be considered for time constant estimation
            'lags': 5,
            'optimize_g': False,         # flag for optimizing time constants
            'memory_efficient': False,
            # method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy')
            # if method cvxpy, primary and secondary (if problem unfeasible for approx
            # solution) solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT'
            'method_deconvolution': method_deconvolution,  # 'cvxpy', # 'oasis'
            'nb': gnb,                   # number of background components
            'noise_method': 'mean',     # averaging method ('mean','median','logmexp')
            'noise_range': [.25, .5],   # range of normalized frequencies over which to average
            'num_blocks_per_run_temp': num_blocks_per_run_temp, # number of process to parallelize residual computation ** DECREASE IF MEMORY ISSUES
            'p': p,                     # order of AR indicator dynamics
            's_min': s_min,             # minimum spike threshold
            'solvers': ['ECOS', 'SCS'],
            'verbosity': False,
        }

        self.merging = {
            'do_merge': do_merge,
            'merge_thr': merge_thresh,
            'merge_parallel': False,
            'max_merge_area': max_merge_area
        }

        self.quality = {
            'SNR_lowest': 0.5,         # minimum accepted SNR value
            'cnn_lowest': 0.1,         # minimum accepted value for CNN classifier
            'gSig_range': None,        # range for gSig scale for CNN classifier
            'min_SNR': min_SNR,        # transient SNR threshold
            'min_cnn_thr': 0.9,        # threshold for CNN classifier
            'rval_lowest': -1,         # minimum accepted space correlation
            'rval_thr': rval_thr,      # space correlation threshold
            'use_cnn': True,           # use CNN based classifier
        }

        self.online = {
            'N_samples_exceptionality': N_samples_exceptionality,  # timesteps to compute SNR
            'batch_update_suff_stat': batch_update_suff_stat,
            'dist_shape_update': False,        # update shapes in a distributed way
            'ds_factor': 1,                    # spatial downsampling for faster processing
            'epochs': 1,                       # number of epochs
            'expected_comps': expected_comps,  # number of expected components
            'init_batch': 200,                 # length of mini batch for initialization
            'init_method': 'bare',             # initialization method for first batch,
            'iters_shape': iters_shape,        # number of block-CD iterations
            'max_comp_update_shape': max_comp_update_shape,
            'max_num_added': max_num_added,    # maximum number of new components for each frame
            'max_shifts_online': 10,           # maximum shifts during motion correction
            'min_SNR': min_SNR,                # minimum SNR for accepting a new trace
            'min_num_trial': min_num_trial,    # number of mew possible components for each frame
            'minibatch_shape': minibatch_shape,  # number of frames in each minibatch
            'minibatch_suff_stat': minibatch_suff_stat,
            'motion_correct': True,            # flag for motion correction
            'movie_name_online': 'online_movie.avi',  # filename of saved movie (appended to directory where data is located)
            'normalize': False,                # normalize frame
            'n_refit': n_refit,                # Additional iterations to simultaneously refit
            # path to CNN model for testing new comps
            'num_times_comp_updated': num_times_comp_updated,
            'path_to_model': os.path.join(caiman_datadir(), 'model',
                                          'cnn_model_online.h5'),
            'rval_thr': rval_thr,              # space correlation threshold
            'save_online_movie': False,        # flag for saving online movie
            'show_movie': False,               # display movie online
            'simultaneously': simultaneously,  # demix and deconvolve simultaneously
            'sniper_mode': sniper_mode,        # flag for using CNN
            'test_both': test_both,            # flag for using both CNN and space correlation
            'thresh_CNN_noisy': thresh_CNN_noisy,  # threshold for online CNN classifier
            'thresh_fitness_delta': thresh_fitness_delta,
            'thresh_fitness_raw': thresh_fitness_raw,    # threshold for trace SNR (computed below)
            'thresh_overlap': thresh_overlap,
            'update_freq': update_freq,            # update every shape at least once every update_freq steps
            'update_num_comps': update_num_comps,  # flag for searching for new components
            'use_dense': use_dense,            # flag for representation and storing of A and b
            'use_peak_max': use_peak_max,      # flag for finding candidate centroids
        }

        self.motion = {
            'border_nan': 'copy',               # flag for allowing NaN in the boundaries
            'gSig_filt': None,                  # size of kernel for high pass spatial filtering in 1p data
            'is3D': False,                      # flag for 3D recordings for motion correction
            'max_deviation_rigid': 3,           # maximum deviation between rigid and non-rigid
            'max_shifts': (6, 6),               # maximum shifts per dimension (in pixels)
            'min_mov': None,                    # minimum value of movie
            'niter_rig': 1,                     # number of iterations rigid motion correction
            'nonneg_movie': True,               # flag for producing a non-negative movie
            'num_frames_split': 80,             # split across time every x frames
            'num_splits_to_process_els': [7, None],
            'num_splits_to_process_rig': None,
            'overlaps': (32, 32),               # overlap between patches in pw-rigid motion correction
            'pw_rigid': False,                  # flag for performing pw-rigid motion correction
            'shifts_opencv': True,              # flag for applying shifts using cubic interpolation (otherwise FFT)
            'splits_els': 14,                   # number of splits across time for pw-rigid registration
            'splits_rig': 14,                   # number of splits across time for rigid registration
            'strides': (96, 96),                # how often to start a new patch in pw-rigid registration
            'upsample_factor_grid': 4,          # motion field upsampling factor during FFT shifts
            'use_cuda': False                   # flag for using a GPU
        }

        self.change_params(params_dict)
        self.data['last_commit'] = '-'.join(caiman.utils.utils.get_caiman_version())
        if self.data['dims'] is None and self.data['fnames'] is not None:
            self.data['dims'] = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[0]
        if self.data['fnames'] is not None:
            if isinstance(self.data['fnames'], str):
                self.data['fnames'] = [self.data['fnames']]
            if self.motion['is3D']:
                T = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[0][0]
            else:
                T = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[1]
            if len(self.data['fnames']) > 1:
                T = T[0]
            num_splits = T//max(self.motion['num_frames_split'],10)
            self.motion['splits_els'] = num_splits
            self.motion['splits_rig'] = num_splits
            self.online['movie_name_online'] = os.path.join(os.path.dirname(self.data['fnames'][0]), self.online['movie_name_online'])
        if self.online['N_samples_exceptionality'] is None:
            self.online['N_samples_exceptionality'] = np.ceil(self.data['fr'] * self.data['decay_time']).astype('int')
        if self.online['thresh_fitness_raw'] is None:
            self.online['thresh_fitness_raw'] = scipy.special.log_ndtr(
                -self.online['min_SNR']) * self.online['N_samples_exceptionality']
        self.online['max_shifts_online'] = (np.array(self.online['max_shifts_online']) / self.online['ds_factor']).astype(int)
        if self.init['gSig'] is None:
            self.init['gSig'] = [-1, -1]
        if self.init['gSiz'] is None:
            self.init['gSiz'] = [2*gs + 1 for gs in self.init['gSig']]
        self.init['gSiz'] = [gz if gz % 2 else gz + 1 for gz in self.init['gSiz']]

        if gnb <= 0:
            logging.warning("gnb={0}, hence setting keys nb_patch and low_rank_background ".format(gnb) +
                            "in group patch automatically.")
            self.set('patch', {'nb_patch': gnb, 'low_rank_background': None})
        if gnb == -1:
            logging.warning("gnb=-1, hence setting key update_background_components " +
                            "in group spatial automatically to False.")
            self.set('spatial', {'update_background_components': False})
        if method_init=='corr_pnr' and ring_size_factor is not None:
            logging.warning("using CNMF-E's ringmodel for background hence setting key " +
                            "normalize_init in group init automatically to False.")
            self.set('init', {'normalize_init': False})
Beispiel #59
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3, normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1, method_ls='nnls_L0'):
    """update spatial footprints and background through Basis Pursuit Denoising 

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty        
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated 

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)
    f: np.ndarray
        same as f_in except if empty component deleted.

    """
    C = np.array(C)
    if normalize_yyt_one:
        #        cct=np.diag(C.dot(C.T))
        nr_C = np.shape(C)[0]
        d = scipy.sparse.lil_matrix((nr_C, nr_C))
        d.setdiag(np.sqrt(np.sum(C**2, 1)))
        A_in = A_in * d
        C = old_div(C, np.sqrt(np.sum(C**2, 1)[:, np.newaxis]))

    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not isinstance(Y, basestring):
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception('Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        print(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decreasing suitably.')
        n_pixels_per_process = d

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print("spatial support for each components given by the user")
        if C is None:
            INDav = old_div(IND.astype('float32'), np.sum(IND, axis=0))
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(
            A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore, dview=dview)
        print("found spatial support for each component")
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print((np.shape(A_in)))

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)       # number of neurons

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print(('cluster temporary folder:' + tmpf))
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()

    if dview is None:

        Y_name = Y
        C_name = Cf

    else:

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif isinstance(Y, basestring) or dview is None:
            Y_name = Y
        else:
            raise Exception('Not implemented consistently')
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
# for i in range(0, np.prod(dims) - n_pixels_per_process + 1,
# n_pixels_per_process)]
    cct = np.diag(C.dot(C.T))
    rank_f = nb
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, i + n_pixels_per_process)), method_ls, cct, rank_f])

    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, np.prod(dims))), method_ls, cct, rank_f])

    A_ = np.zeros((d, nr + np.size(f, 0)))
    print('Starting Update Spatial Components')

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(regression_ipyparallel, pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
##
#        Cf_ = [Cf[idx_, :] for idx_ in ind2_]
#
#        #% LARS regression
#        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))
#
#        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
#            if px % 1000 == 0:
#                print px
#            if np.size(c) > 0:
#                _, _, a, _, _ = lars_regression_noise_old(y, np.array(c.T), 1, sn[px]**2 * T)
#                if np.isscalar(a):
#                    A_[px, id2_] = a
#                else:
#                    A_[px, id2_] = a.T
##

    #%
    print('Updated Spatial Components')

    A_ = threshold_components(A_, dims, dview=dview, medw=medw, thr_method=thr_method,
                              maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc, se=se, ss=ss)

    print("threshold")
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating {} empty components!!'.format(len(ff)))
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
        background_ff = list(filter(lambda i: i > 0, ff-nr))
        f = np.delete(f, background_ff, 0)
        nr = nr - (len(ff) - len(background_ff))
        nb = nb - len(background_ff)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    # pdb.set_trace()
#    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y, f.T, block_size=1000, dview=dview) - \
            A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    print("Computing A_bas")
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)  # update baseline based on residual
    # A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print(("--- %s seconds ---" % (time.time() - start_time)))

    try:  # clean up
        # remove temporary file created
        print("Remove temporary file created")
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)


    # if A_in.dtype == bool:
    #     return A_, b, C, f
    # else:
    #     return A_, b, C
    return A_, b, C, f
Beispiel #60
0
def determine_search_location(A, dims, method='ellipse', min_size=3, max_size=8, dist=3,
                              expandCore=iterate_structure(generate_binary_structure(2, 1), 2).astype(int), dview=None):
    """
    restrict search location to subset of pixels

    TODO
    """
    from scipy.ndimage.morphology import grey_dilation
    from scipy.sparse import coo_matrix, issparse

    if len(dims) == 2:
        d1, d2 = dims
    elif len(dims) == 3:
        d1, d2, d3 = dims

    d, nr = np.shape(A)

    A = csc_matrix(A)

    IND = False * np.ones((d, nr))
    if method == 'ellipse':
        Coor = dict()
        if len(dims) == 2:
            Coor['x'] = np.kron(np.ones(d2), list(range(d1)))
            Coor['y'] = np.kron(list(range(d2)), np.ones(d1))
        elif len(dims) == 3:
            Coor['x'] = np.kron(np.ones(d3 * d2), list(range(d1)))
            Coor['y'] = np.kron(np.kron(np.ones(d3), list(range(d2))), np.ones(d1))
            Coor['z'] = np.kron(list(range(d3)), np.ones(d2 * d1))
        if not dist == np.inf:             # determine search area for each neuron
            cm = np.zeros((nr, len(dims)))        # vector for center of mass
            Vr = []    # cell(nr,1);
            IND = []       # indicator for distance

            for i, c in enumerate(['x', 'y', 'z'][:len(dims)]):
                cm[:, i] = old_div(np.dot(Coor[c], A[:, :nr].todense()), A[:, :nr].sum(axis=0))

#            for i in range(nr):            # calculation of variance for each component and construction of ellipses
#                dist_cm = coo_matrix(np.hstack([Coor[c].reshape(-1, 1) - cm[i, k]
#                                                for k, c in enumerate(['x', 'y', 'z'][:len(dims)])]))
#                Vr.append(dist_cm.T * spdiags(A[:, i].toarray().squeeze(),
#                                              0, d, d) * dist_cm / A[:, i].sum(axis=0))
#
#                if np.sum(np.isnan(Vr)) > 0:
#                    raise Exception('You cannot pass empty (all zeros) components!')
#
#                D, V = eig(Vr[-1])
#
#                dkk = [np.min((max_size**2, np.max((min_size**2, dd.real)))) for dd in D]
#
#                # search indexes for each component
#                IND.append(np.sqrt(np.sum([(dist_cm * V[:, k])**2 / dkk[k]
#                                           for k in range(len(dkk))], 0)) <= dist)
#            IND = (np.asarray(IND)).squeeze().T
            pars = []
            for i in range(nr):
                pars.append([Coor, cm[i], A[:, i], Vr, dims, dist, max_size, min_size, d])

            if dview is None:
                res = list(map(contruct_ellipse_parallel, pars))
            else:
                res = dview.map_sync(contruct_ellipse_parallel, pars)

            for r in res:
                IND.append(r)

            IND = (np.asarray(IND)).squeeze().T

        else:
            IND = True * np.ones((d, nr))
    elif method == 'dilate':
        for i in range(nr):
            A_temp = np.reshape(A[:, i].toarray(), dims[::-1])  # , order='F')
            # A_temp = np.reshape(A[:, i].toarray(), (d2, d1))
            if len(expandCore) > 0:
                if len(expandCore.shape) < len(dims):  # default for 3D
                    expandCore = iterate_structure(
                        generate_binary_structure(len(dims), 1), 2).astype(int)
                A_temp = grey_dilation(A_temp, footprint=expandCore)
            else:
                A_temp = grey_dilation(A_temp, [1] * len(dims))

            IND[:, i] = np.squeeze(np.reshape(A_temp, (d, 1))) > 0
    else:
        IND = True * np.ones((d, nr))

    return IND