示例#1
0
def gen_loss_seg(network_eval_batch, label_placeholder):
    # Apply morphological filtering to the label
    filter1 = tf.expand_dims(
        tf.constant(morph.iterate_structure(
            morph.generate_binary_structure(2, 1), 5),
                    dtype=tf.float32), -1)
    seg_morph = tf.nn.dilation2d(
        tf.nn.erosion2d(label_placeholder, filter1, [1, 1, 1, 1], [1, 1, 1, 1],
                        "SAME"), filter1, [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
    filter2 = tf.expand_dims(
        tf.constant(morph.iterate_structure(
            morph.generate_binary_structure(2, 1), 4),
                    dtype=tf.float32), -1)
    seg_morph = tf.nn.erosion2d(
        tf.nn.dilation2d(seg_morph, filter2, [1, 1, 1, 1], [1, 1, 1, 1],
                         "SAME"), filter2, [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
    #seg_morph = label_placeholder

    # Create the 2 bins
    less_g = tf.less(seg_morph, 200)
    greater_g = tf.greater(seg_morph, 99)
    gray = less_g & greater_g
    val_gray = tf.to_float(gray) * 0.5
    white = tf.greater(seg_morph, 199)
    val_white = tf.to_float(white)
    mouse_label = val_gray + val_white
    background_label = tf.to_float(tf.equal(seg_morph, 0.0))
    combined_label = tf.concat([mouse_label, background_label], axis=3)
    flat_combined_label = tf.reshape(combined_label, [-1, 2])
    flat_network_eval = tf.reshape(network_eval_batch, [-1, 2])
    loss = tf.losses.softmax_cross_entropy(flat_combined_label,
                                           flat_network_eval)
    # Could do something fancy with counting TP/FP/TN/FN based on a softmax/argmax between the 2
    errors = None
    return loss, errors
示例#2
0
def _get_peaks(spectrogram: np.ndarray) -> list:
    """
    Calculate local maximums of the spectrogram.

    :param spectrogram: spectrogram of an audio piece;
    :return: list of tuples with frequency and time bin indices of the
        detected local peaks: [(f1, t1), (f2, t2), ...]
    """
    struct = generate_binary_structure(2, 1)
    # dilate the kernel with itself for PEAK_NEIGHB_SIZE iterations;
    # the resulting size of the neighborhood is:
    #   (PEAK_NEIGHB_SIZE * 2 + 1, PEAK_NEIGHB_SIZE * 2 + 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHB_SIZE)

    # finding a local maximum in each neighborhood
    local_max = maximum_filter(spectrogram,
                               footprint=neighborhood) == spectrogram

    magnitude_thresh = np.percentile(spectrogram, MAGNITUDE_PERCENTILE)

    # leaving only those local maximums that exceed the magnitude threshold
    peak_cond = local_max & (spectrogram > magnitude_thresh)
    freq_idx, time_idx = np.nonzero(peak_cond)

    return list(zip(freq_idx, time_idx))
def background_mask(img, seed=0):
    """
    create a background mask for a given mr img

    Args:
        img (nibabel.nifti1.Nifti1Image): img from which to extract background
        seed (int): since random sampling used, pick seed for reproducibility

    Returns:
        background (nibabel.nifti1.Nifti1Image): background mask
    """
    np.random.seed(seed)
    logger.info('Finding Background...')
    img_data = img.get_data()
    km = KMeans(4)
    rand_mask = np.random.rand(*img_data.shape) > 0.75
    logger.info('Fitting KMeans...')
    km.fit(np.expand_dims(img_data[rand_mask], 1))
    logger.info('Generating Mask...')
    classes = km.predict(np.expand_dims(img_data.flatten(),
                                        1)).reshape(img_data.shape)
    with warnings.catch_warnings():
        warnings.filterwarnings('ignore')
        means = [np.mean(img_data[classes == i]) for i in range(4)]
    raw_mask = (classes == np.argmin(means)) == 0.0
    filled_raw_mask = __fill_2p5d(raw_mask)
    dist2_5by5_kernel = iterate_structure(generate_binary_structure(3, 1), 2)
    closed_mask = binary_closing(filled_raw_mask, dist2_5by5_kernel, 5)
    filled_closed_mask = __fill_2p5d(
        np.logical_or(closed_mask, filled_raw_mask)).astype(np.float32)
    bg_mask = binary_dilation(filled_closed_mask,
                              generate_binary_structure(3, 1), 2)
    background = nib.Nifti1Image(bg_mask, img.affine, img.header)
    return background
示例#4
0
    def spectrogram_and_peaks(self, file_path, show_spectrogram=False):
        sample_rate, samples = wavfile.read(file_path)

        arr2D = mlab.specgram(samples,
                              NFFT=4096,
                              Fs=44100,
                              window=mlab.window_hanning,
                              noverlap=(4096 * 0.5))[0]

        arr2D = 10 * np.log10(
            arr2D, out=np.zeros_like(arr2D), where=(arr2D != 0))

        # Créé une structure binaire de dimension 2 avec un connectivité de 2, en gros tous les éléments sont connectés à
        # l'élément central
        struct = generate_binary_structure(2, 2)

        # On reproduit la structure mais en l'ittérant 10 fois
        neighborhood = iterate_structure(struct, 10)

        # find local maxima using our filter mask
        local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D

        # Applying erosion, the dejavu documentation does not talk about this step.
        background = (arr2D == 0)
        eroded_background = binary_erosion(background,
                                           structure=neighborhood,
                                           border_value=1)

        # Boolean mask of arr2D with True at peaks (applying XOR on both matrices).
        detected_peaks = local_max != eroded_background

        # extract peaks
        amps = arr2D[detected_peaks]
        freqs, times = np.where(detected_peaks)

        # filter peaks
        # flattern retourne l'array 2D en 1D
        amps = amps.flatten()

        # get indices for frequency and time
        filter_idxs = np.where(amps > 10)

        freqs_filter = freqs[filter_idxs]
        times_filter = times[filter_idxs]

        local_maxima = list(zip(freqs_filter, times_filter))

        if show_spectrogram:
            fig, ax = plt.subplots()
            ax.imshow(arr2D)
            ax.scatter(times_filter, freqs_filter)
            ax.set_xlabel('Time')
            ax.set_ylabel('Frequency')
            ax.set_title("Spectrogram")
            plt.gca().invert_yaxis()
            plt.show()

        os.remove(file_path)

        return self.generate_hashes(local_maxima, 10)
    def get_2D_peaks(arr2d, amp_min=threshold):
        # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
        struct = generate_binary_structure(2, 1)
        neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

        # find local maxima using our fliter shape
        local_max = maximum_filter(arr2d, footprint=neighborhood) == arr2d
        background = (arr2d <= 40.0)
        eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

        # Boolean mask of specgram with True at peaks
        detected_peaks = 1 * local_max - 1 * eroded_background

        # extract peaks
        amps = arr2d[detected_peaks]
        j, i = np.where(detected_peaks)

        # filter peaks by amplitude
        amps = amps.flatten()
        peaks = zip(i, j, amps)  # freq, time, amp

        peaks_filtered = []
        for x in peaks:
            if x[2] > amp_min:
                peaks_filtered.append(x)
            else:
                detected_peaks[x[1]][x[0]] = False

        # get indices for frequency and time
        frequency_idx = [x[1] for x in peaks_filtered]
        time_idx = [x[0] for x in peaks_filtered]

        return zip(frequency_idx, time_idx), detected_peaks
示例#6
0
def get_peaks(arr2d, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our filter shape
    local_max = maximum_filter(arr2d, footprint=neighborhood) == arr2d
    background = (arr2d == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks (Fixed deprecated boolean operator by changing '-' to '^')
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = arr2d[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = filter(lambda tup: tup[2] > amp_min,
                            peaks)  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = []
    time_idx = []
    for x in peaks_filtered:
        frequency_idx.append(x[1])
        time_idx.append(x[0])

    return list(zip(frequency_idx, time_idx))
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D

    background = (arr2D == 0)

    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
示例#8
0
def getPeaks(arr2D, amp_min=DEFAULT_AMP_MIN):
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)
    # Boolean mask of arr2D with True at peaks
    # ^ 8 bittte toplama işlemi
    ##detected_peaks = np.logical_xor(local_max,eroded_background)
    detected_peaks = local_max ^ eroded_background
    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)
    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp
    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]
    print("Peakler bulundu")
    return frequency_idx, time_idx
示例#9
0
def get_2D_peaks(spectrogram, amp_min=20, max_neighbors=20):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, max_neighbors)
    detected_peaks = (maximum_filter(spectrogram, footprint=neighborhood) == spectrogram) * (spectrogram > amp_min)
    freqs, times = np.where(detected_peaks)

    return zip(freqs, times)
def spectrogram_to_peaks(specgram, freqs, times):
    """
    Parameters: 
    
    specgram
        The M X N numpy array where axis zero corresponds to times while axis one corresponds to C_k. 
        
    freqs
        Array of frequencies. 
    
    times
        Array of times where each time value corresponds to the middle of the time bin.
        
    -----
    
    Returns:
        
    peaks
        A M X N numpy array where the C_k values below the 90th percentile 
    
    
        
    """
    fp = generate_binary_structure(2, 1)
    fp = iterate_structure(fp, 20)

    background_threshold = find_ninety_C_k(specgram)

    peaks = ((specgram == maximum_filter(specgram, footprint=fp)) &
             (specgram > background_threshold))

    return peaks
示例#11
0
    def get_peaks_above_threshold(self, spectrum, threshold):

        #print ("    Finding peaks...")
        # Generate a binary structure for binary morphological operations.
        struct = morphology.generate_binary_structure(2, 1)
        neighborhood = morphology.iterate_structure(
            struct, RunParams.Default_Peak_Neighborhood_Size)

        # find peaks using maximum filter
        peaks = filters.maximum_filter(spectrum,
                                       footprint=neighborhood) == spectrum
        background = (spectrum == 0)
        eroded_background = morphology.binary_erosion(background,
                                                      structure=neighborhood,
                                                      border_value=1)

        # Boolean mask of spectrum with True at peaks
        peaks = peaks - eroded_background

        #print ("    Peaks extrated.")

        # extract peaks
        peaks_extracted = spectrum[peaks]
        j, i = np.where(peaks)

        # create a list of tuple (freq, time, amp)
        p = zip(i, j, peaks_extracted.flatten())
        peaks_filtered = [x for x in p if x[2] > threshold]

        #print ("    Number of peaks:", len(peaks_filtered))
        # get indices for frequency and time
        frequency_idx = [x[1] for x in peaks_filtered]
        time_idx = [x[0] for x in peaks_filtered]

        return peaks_filtered, time_idx, frequency_idx
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D

    background = (arr2D == 0)

    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
示例#13
0
def get_amplitudes(array, plot=False, amp_minimum=MIN_AMP):
    structure = generate_binary_structure(2, 1)
    surroundings = iterate_structure(structure, PEAK_CELLS_SIZE)

    local_max = maximum_filter(array, footprint=surroundings) == array

    background = (array == 0)
    eroded_b = binary_erosion(background,
                              structure=surroundings,
                              border_value=1)

    # Getting the peaks or points in the spectrogram with the highest amplitudes
    found_peaks = local_max ^ eroded_b

    highest_amps = array[found_peaks]
    j, i = np.where(found_peaks)

    # filtering highest points in specgram
    highest_amps = highest_amps.flatten()
    points = zip(i, j, highest_amps)
    new_points = [x for x in points if x[2] > amp_minimum]

    # find frequency and time
    freq_loc = [x[0] for x in new_points]
    time_loc = [x[1] for x in new_points]

    # In array of new peaks in specgram, frequency, time, and amplitude are stored in the
    # first three indices

    # Handle plotting case
    return list(zip(freq_loc, time_loc))
示例#14
0
def get_2D_peaks(arr2D, amp_min=DEFAULT_AMP_MIN):
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, CONNECTIVITY_MASK)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    try:
        time_idx, frequency_idx, amps_filtered = zip(
            *filter(lambda x: x[2] > amp_min, peaks))
    except ValueError:
        return []
    return zip(frequency_idx, time_idx)
示例#15
0
def get_peaks(spec, peak_nhood_size=15, amp_min=10, plot=False):
    struct = generate_binary_structure(rank=2, connectivity=2)  # square mask
    neighborhood = iterate_structure(struct, iterations=peak_nhood_size)
    local_max = maximum_filter(spec, footprint=neighborhood) == spec
    background = (spec == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)
    detected_peaks = local_max != eroded_background
    amps = spec[detected_peaks]
    freqs, times = np.where(detected_peaks)
    amps = amps.flatten()
    filter_idxs = np.where(amps > amp_min)
    freqs_filter = freqs[filter_idxs]
    times_filter = times[filter_idxs]

    if plot:
        fig, ax = plt.subplots()
        ax.imshow(spec,
                  origin="lower",
                  aspect="auto",
                  cmap="jet",
                  interpolation="none")
        ax.scatter(times_filter, freqs_filter, s=1)
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.show()

    return list(zip(freqs_filter, times_filter))
示例#16
0
def peaks(spectrogram_arr):
    """
    This finds the peaks of a spectrogram.
    :param:
    spectrogram_arr[np.array]:
    An array of the spectrogram.
    The 2D array of the coefficients of the DFT of the song. So S[i, j] is the coefficient at frequency = freq[i]
    and time = time[j].
    :return:
    peaks[np.array]:
    This is an array with the values of the peaks in the respective
    areas that are there.
    """
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 10)

    is_peaks = spectrogram_arr == maximum_filter(spectrogram_arr,
                                                 footprint=neighborhood)
    ys, xs = np.histogram(spectrogram_arr.flatten(),
                          bins=spectrogram_arr.size // 2,
                          normed=True)
    dx = xs[-1] - xs[-2]
    cdf = np.cumsum(
        ys) * dx  # this gives you the cumulative distribution of amplitudes
    cutoff = xs[np.searchsorted(cdf, 0.77)]
    foreground = (spectrogram_arr >= cutoff)
    # fig, (ax1, ax2) = plt.subplots(1, 2)
    # ax2.imshow(foreground)
    # ax1.imshow(np.logical_and(foreground, is_peaks))
    return np.logical_and(foreground, is_peaks)
 def get_2D_peaks(self, arr2D, amp_min=10):
     struct = generate_binary_structure(2, 1)
     neighborhood = iterate_structure(struct, 20)
     # find local maxima using our filter shape
     local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
     background = (arr2D == 0)
     eroded_background = binary_erosion(background,
                                        structure=neighborhood,
                                        border_value=1)
     # Boolean mask of arr2D with True at peaks (Fixed deprecated boolean operator by changing '-' to '^')
     detected_peaks = local_max ^ eroded_background
     # extract peaks
     amps = arr2D[detected_peaks]
     j, i = np.where(detected_peaks)
     # filter peaks
     amps = amps.flatten()
     peaks = zip(i, j, amps)
     peaks_filtered = filter(lambda x: x[2] > amp_min,
                             peaks)  # freq, time, amp
     # get indices for frequency and time
     frequency_idx = []
     time_idx = []
     for x in peaks_filtered:
         frequency_idx.append(x[1])
         time_idx.append(x[0])
     # scatter of the peaks
     fig, ax = pylab.subplots()
     ax.imshow(arr2D)
     ax.scatter(time_idx, frequency_idx)
     ax.get_xaxis().set_visible(False)
     ax.get_yaxis().set_visible(False)
     pylab.gca().invert_yaxis()
     pylab.savefig('spectrogramPeaks.png',
                   bbox_inches='tight',
                   pad_inches=0)
def get_2D_peaks(array2D):
    # This function is based on the function 'get_2D_peaks()' available at the URL below.
    # https://github.com/worldveil/dejavu/blob/master/dejavu/fingerprint.py
    # Copyright (c) 2013 Will Drevo, use permitted under the terms of the open-source MIT License.

    # Create a filter to extract peaks from the image data.
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 25)

    # Find local maxima using our fliter shape. These are boolean arrays.
    local_maxima = maximum_filter(array2D, footprint=neighborhood) == array2D
    background = (array2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    # Boolean mask of array2D with True at peaks.
    detected_peaks = local_maxima - eroded_background

    # Extract peak amplitudes and locations.
    amps = array2D[detected_peaks]
    j, i = numpy.where(detected_peaks)

    # Filter peaks for those exceeding the minimum amplitude.
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]

    # Get frequency and time at peaks.
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return (frequency_idx, time_idx)
示例#19
0
def peak_find(samples, prnt_spectro=False):
    '''
    Finds peak values of given sample data

    Parameters
    -----------
    samples: ndarray
        song sample data for peaks to be found in
    prnt_spectro: boolean (default = False)
        whether a spectrogram of the song should be shown on screen

    Returns
    --------
    times: ndarray
        indices of peak times
    freqs: ndarray
        indices of peak frequencies
    '''
    s, f, t = make_spectrogram(samples, prnt_spectro)

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 20)

    ys, xs = np.histogram(s.flatten(), bins=len(f) * len(t) // 2, normed=True)
    dx = xs[-1] - xs[-2]
    cdf = np.cumsum(ys) * dx
    cutoff = xs[np.searchsorted(cdf, 0.77)]

    output = np.logical_and(s >= cutoff, s == maximum_filter(s, footprint=neighborhood)).T
    tf = np.where(output)
    return tf[0], tf[1]
示例#20
0
def find_peaks(spec):
    """Finds peaks of audio object spectrogram."""

    peak_neighbourhood_size = 15
    min_amp = 10

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, peak_neighbourhood_size)

    # find local maxima using filter shape
    local_max = maximum_filter(spec, footprint=neighborhood) == spec
    background = (spec == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # boolean mask with True at peaks
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = spec[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks so amplitude is greater than min_amp
    amps = amps.flatten()
    peaks = zip(i, j, amps)

    # freq, time, amp
    peaks_filtered = [x for x in peaks if x[2] > min_amp]

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return (frequency_idx, time_idx)
示例#21
0
def peak_find(samples):
    '''
    Finds peak values of given sample data

    Parameter(s)
    -------------
    samples: song sample data for peaks to be found in.

    Returns
    -------------
    Boolean np.ndarray with true values at peaks, false values at non-peaks

    '''
    spectro_output = make_spectrogram(samples)
    s = spectro_output['spectro']

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 20)

    ys, xs = np.histogram(s.flatten(),
                          bins=len(spectro_output['f']) // 2,
                          normed=True)
    dx = xs[-1] - xs[-2]
    cdf = np.cumsum(ys) * dx
    cutoff = xs[np.searchsorted(cdf, 0.77)]

    output = np.logical_and(s >= cutoff,
                            s == maximum_filter(s, footprint=neighborhood)).T
    return {
        'indices': np.where(output),
        'f': spectro_output['f'],
        't': spectro_output['t']
    }
示例#22
0
def plotPeaks(arr2D, amp_min=DEFAULT_AMP_MIN):
	# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
	struct = generate_binary_structure(2, 1)
	neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

	# find local maxima using our fliter shape
	local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
	background = (arr2D == 0)
	eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

	# Boolean mask of arr2D with True at peaks
	detected_peaks = local_max - eroded_background

	# extract peaks
	amps = arr2D[detected_peaks]
	j, i = np.where(detected_peaks)

	# filter peaks
	amps = amps.flatten()
	peaks = zip(i, j, amps)
	peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

	# get indices for frequency and time
	frequency_idx = [x[1] for x in peaks_filtered]
	time_idx = [x[0] for x in peaks_filtered]

	# scatter of the peaks
	fig, ax = plt.subplots()
	ax.imshow(arr2D)
	ax.scatter(time_idx, frequency_idx)
	ax.set_xlabel('Time')
	ax.set_ylabel('Frequency')
	ax.set_title("Spectrogram")
	plt.gca().invert_yaxis()
	plt.show()
示例#23
0
def get2DPeaks(arr2D):
    """
        Generates peaks of a spectogram.
        Args:
            arr2D: spectogram.
        Returns:
            List of pairs (time, frequency) of peaks.
    """

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
示例#24
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = arr2D == 0
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    detected_peaks = local_max - eroded_background

    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel("Time")
        ax.set_ylabel("Frequency")
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return zip(frequency_idx, time_idx)
示例#25
0
def find_peaks(song, freqs):
    """
    Find the peaks in the two-dimensional array that describes a song
    Parameters:
    ----------
    song: numpy.ndarray (MxN)
        the two dimensional array of Fourier-constants describing the song
        song[i,j] is the magnitude of the Fourier-constant for frequency i at time j
    Returns:
    --------
    peaks: binary array (MxN)
        the binaray "mask" that identifies the locations of peaks
        peaks[i,j] is True if there is a local peak for frequency i at time j 
    """
    #generates proper neighborhood
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 25)  # this incorporates roughly 20 nearest neighbors
    #finds foreground
    ys, xs = np.histogram(song.flatten(), bins=len(freqs)//2, normed=True)
    dx = xs[-1] - xs[-2]
    cdf = np.cumsum(ys)*dx  # this gives you the cumulative distribution of amplitudes
    cutoff = xs[np.searchsorted(cdf, 0.77)]
    foreground = (song >= cutoff)
    #generates boolean array of peaks that are both peaks and in the foreground
    peaks = np.logical_and((song == maximum_filter(song, footprint=neighborhood)), foreground)
    return peaks
示例#26
0
def get_2D_peaks(arr2D, plot=False):

  # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
  struct = generate_binary_structure(2, 1)
  peak_neigh_size = 20
  neighborhood = iterate_structure(struct, peak_neigh_size) 

  # find local maxima using our fliter shape
  local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D 
  background = (arr2D == 0)
  eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
  detected_peaks = local_max ^ eroded_background # this is a boolean mask of arr2D with True at peaks

  # extract peaks
  amps = arr2D[detected_peaks]
  j, i = np.where(detected_peaks) 

  # filter peaks
  amps = amps.flatten()
  peaks = zip(i, j, amps)
  peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
  
  # get indices for frequency and time
  frequency_idx = [x[1] for x in peaks_filtered]
  time_idx = [x[0] for x in peaks_filtered]

  return zip(frequency_idx, time_idx)
示例#27
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)
    detected_peaks = local_max ^ eroded_background
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > 10]
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]
    # scatter of the peaks
    if plot:
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()
    return zip(frequency_idx, time_idx)
示例#28
0
    def get_2D_peaks(self, arr2D, plot=False, min_amp=DEFAULT_MIN_AMP, verbose=False):
        struct = generate_binary_structure(2, 1)
        neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

        # find local maxima
        localmax = maximum_filter(arr2D, footprint=neighborhood) == arr2D
        background = (arr2D == 0)

        eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

        # boolean mask of 2d array with True peaks
        detected_peaks = localmax ^ eroded_background

        # extract peaks
        amps = arr2D[detected_peaks]
        j, i = np.where(detected_peaks)  # time, frequency

        # filter peaks
        amps = amps.flatten()

        # stores information in a dictionary // used by audio similarity
        # if store_data:
        #     self.set_data(i, j, amps)

        peaks = zip(i, j, amps)  # freq, time, amp

        # only consider peaks above a specific amplitude
        peaks_filtered = [x for x in peaks if x[2] > min_amp]

        # get idx for freq and time
        freq_idx = [x[0] for x in peaks_filtered]
        time_idx = [x[1] for x in peaks_filtered]

        if verbose:
            print('FINGERPRINTER DETAILS ***********')
            print('Number of peak idx: ', len(freq_idx))
            print('Number of time idx: ', len(time_idx))
            print('Length of segment:',
                  round(len(arr2D[1]) / DEFAULT_FREQ * DEFAULT_WINDOW_SIZE * DEFAULT_OVERLAP_RATIO, 5), 'seconds')

        if plot:
            print('Plotting spectrogram!')
            fig, ax = plt.subplots()
            ax.imshow(arr2D, cmap='gnuplot')
            ax.scatter(freq_idx, time_idx)
            ax.set_xlabel('Time')
            ax.set_ylabel('Frequency')
            ax.set_title('Spectrogram')
            ax.set_aspect('auto', adjustable='box')
            plt.gca().invert_yaxis()
            plt.show()
            plt.close()

            print('Plotting peaks!')
            plt.scatter(freq_idx, time_idx)
            plt.grid(True)
            plt.show()
        # python 2 would cast to a list when using zip, py3 does not
        return list(zip(freq_idx, time_idx))
def determine_search_location_per_channel(A, d1, d2, nr, method= 'ellipse',
                                          min_size=3, max_size=8, dist=3,
                                          expandCore=iterate_structure(generate_binary_structure(2,1),2).astype(int)):

    IND = np.array([]).reshape((d1*d2,0))

    if type(method) == str: method = [method]

    for ni in xrange(len(np.unique(nr))):
        IND_ch = determine_search_location(A[:,comp_idx(nr, ni)], d1, d2,
                                        cm[comp_idx(nr,ni),:], method = method[ni], 
                                        min_size = 3, max_size = 8, dist = 3, 
                                        expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int))

        IND = np.hstack([IND, IND_ch])

    return IND
示例#30
0
def getConstellationMap(spectrum,
                        plot=False,
                        min_peak_amp=FPconfig.minimun_peak_amplitude):
    """

    :param spectrum: the array of spectrum in log space (from getSpecgramArr)
    :param plot: if show the plot
    :param min_peak_amp: the minimum value to regard as peak
    :return: 2-d array of peaks [(x1,y1),(x2,y2),.......]
    """

    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, FPconfig.peak_neighborhood_params)

    # find local maxima using our fliter shape
    local_max = maximum_filter(spectrum, footprint=neighborhood) == spectrum
    background = (spectrum == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = spectrum[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks
                      if x[2] > min_peak_amp]  # time, freq, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]
    #print(max(time_idx))

    if plot:
        # scatter of the peaks

        fig, ax = plt.subplots()
        ax.imshow(spectrum)
        ax.scatter(time_idx, frequency_idx, marker=".")
        ax.set_xlabel('Time[sec]')
        ax.set_ylabel('Frequency[kHz]')
        ax.set_title("Spectrogram of Kamini from Anugraheethan Antony")
        plt.gca().invert_yaxis()
        plt.savefig("ceshi.jpg")
        #plt.xlim(200, 800)
        plt.xlim(0, 250)
        plt.ylim(0, 250)
        plt.show()

    return list(zip(time_idx, frequency_idx))
示例#31
0
def get_2D_peaks_laplacian_2d(spectrogram, c=0.2, amp_min=20, max_neighbors=20):
    det = spectrogram - c *scipy.signal.convolve2d(spectrogram, [[0,-1,0],[-1, 2, -1],[0,-1,0]], mode='same')
    
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, max_neighbors)
    detected_peaks = (maximum_filter(det, footprint=neighborhood) == det) * (det > amp_min)
    freqs, times = np.where(detected_peaks)

    return freqs, times
示例#32
0
    def get_contact_areas(self):
        """
        
        :return: 
        """
        if not self.aa_grid:
            self.get_aa_grid()

        vdw_array = self.aa_grid.get_array()
        res_names = [res.identifier for res in self.protein.residues]
        res_nums = [idx + 1 for (idx, val) in enumerate(res_names)]

        #Not contact dict -> directly fill in the matrix. See how slow it is later.
        cad_matrix = np.zeros((len(res_nums), len(res_nums)))

        struct_element = morphology.generate_binary_structure(rank=3,
                                                              connectivity=2)
        small_struct_element = morphology.generate_binary_structure(3, 2)
        large_struct_element = morphology.iterate_structure(
            structure=struct_element, iterations=2)
        print(large_struct_element)

        for r in res_nums:
            loc_arr = np.zeros(vdw_array.shape)
            loc_arr[np.where(vdw_array == r)] = 1
            dil = morphology.binary_dilation(loc_arr,
                                             large_struct_element,
                                             iterations=2)
            contacts = dil * vdw_array
            eroded_contacts = morphology.binary_erosion(
                dil, small_struct_element)
            eroded_contacts = np.abs(eroded_contacts - 1)
            contacts = contacts * eroded_contacts
            near_res = list(set(contacts[contacts.nonzero()]))

            print(r, near_res)
            for n in near_res:
                if int(n) != int(r):
                    cad_matrix[int(r - 1), int(n - 1)] = len(
                        contacts[np.where(contacts == n)])
                    # idx = np.where(contacts==n)
                    # if np.array(zip(*idx)).shape[0] >=4:
                    #     hull = ConvexHull(np.array(zip(*idx)))
                    #     cad_matrix[int(r - 1), int(n - 1)] = hull.area
                    # else:
                    #     cad_matrix[int(r - 1), int(n - 1)] ==1.0
            if r == 1 or r == 50:
                g = CADCalculator.from_array(contacts,
                                             self.aa_grid.bounding_box[0],
                                             self.aa_grid.bounding_box[1])
                g.write("contacts_g_{}_acceptor_ranges.ccp4".format(
                    self.protein.residues[r - 1]))

        self.CAD_matrix = cad_matrix
        fname = self.prot_path.replace(".pdb", "_CAD.txt")
        np.savetxt(fname, cad_matrix)
示例#33
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
 
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # 寻找局部峰值点
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    detected_peaks = local_max - eroded_background

    # 提取峰值点
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # 筛选峰值点
    amps = amps.flatten()
    peaks = list(zip(i, j, amps))
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # 获取时间和频率的下标
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if plot:
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)#在图上绘制散点
        ax.set_xlabel('Time')#横坐标
        ax.set_ylabel('Frequency')#纵坐标
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    peaklist = list(zip(frequency_idx, time_idx))
    h = []

    for i in range(len(peaklist)):
        for j in range(1, DEFAULT_FAN_VALUE):#限定取值范围
            if (i + j) < len(peaklist):
                
                freq1 = peaklist[i][IDX_FREQ_I]
                freq2 = peaklist[i + j][IDX_FREQ_I]
                t1 = peaklist[i][IDX_TIME_J]
                t2 = peaklist[i + j][IDX_TIME_J]
                t_delta = t2 - t1

                if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
                    stmp = "%s|%s|%s" % (str(freq1), str(freq2), str(t_delta))
                    #将第一个峰值点的频率、第二个的频率和两点之间的时间差组成一个字符串并生成哈希
                    h.append((hashlib.sha1(stmp.encode('utf-8')).hexdigest().upper(),t1))

    return h
示例#34
0
def local_peaks(log_spectrogram, amp_min, p_nn):
    """
    Defines a local neighborhood and finds the local peaks
    in the spectrogram, which must be larger than the
    specified `amp_min`.

    Parameters
    ----------
    log_spectrogram : numpy.ndarray, shape=(n_freq, n_time)
        Log-scaled spectrogram. Columns are the periodograms of
        successive segments of a frequency-time spectrum.

    amp_min : float
        Amplitude threshold applied to local maxima

    p_nn : int
        Number of cells around an amplitude peak in the spectrogram in order

    Returns
    -------
    List[Tuple[int, int]]
        Time and frequency index-values of the local peaks in spectrogram.
        Sorted by ascending frequency and then time.

    Notes
    -----
    The local peaks are returned in column-major order for the spectrogram.
    That is, the peaks are ordered by time. That is, we look for nearest
    neighbors of increasing frequencies at the same times, and then move to
    the next time bin.
    """
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, p_nn)
    rows, cols = np.where(neighborhood)
    assert neighborhood.shape[0] % 2 == 1
    assert neighborhood.shape[1] % 2 == 1

    # center neighborhood indices around center of neighborhood
    rows -= neighborhood.shape[0] // 2
    cols -= neighborhood.shape[1] // 2

    detected_peaks = _peaks(log_spectrogram, rows, cols, amp_min=amp_min)

    # Extract peaks; encoded in terms of time and freq bin indices.
    # dt and df are always the same size for the spectrogram that is produced,
    # so the bin indices consistently map to the same physical units:
    # t_n = n*dt, f_m = m*df (m and n are integer indices)
    # Thus we can codify our peaks with integer bin indices instead of their
    # physical (t, f) coordinates. This makes storage and compression of peak
    # locations much simpler.

    return detected_peaks
示例#35
0
def detect_peaks(arr2D, amp_min=-30, plot=False):
    """
    Takes a spectrogram and detects the peaks using the local maximum filter
    Parameters:
        amp_min - minimum aplitude of the frequency value
                    to be considered a peak (in dB)

    Returns a list of (n, k) coordinates of the peaks, where
    n is the frequency and k is the time index
    """
    # define an 8-connected neighborhood
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 20)

    # apply the local maximum filter; all pixels of maximal value
    # in their neighborhood are set to 1
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)
    # final mask
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    frequency_idx = []
    time_idx = []
    for stamp in [x for x in peaks if x[2] > amp_min]:
        frequency_idx.append(stamp[1])
        time_idx.append(stamp[0])

    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx, c='red', marker='x')
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return list(zip(frequency_idx, time_idx))
示例#36
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN, freqs=None, times=None):
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = arr2D == 0
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.set_autoscalex_on(True)
        # 1. need to set 'origin', otherwise, image is upside-down
        # 2. in order to fit image to screen, set 'extent' and 'aspect'
        ax.imshow(
            arr2D,
            origin="lower",
            extent=[times[0], times[-1], freqs[0], freqs[-1]],
            interpolation="nearest",
            aspect="auto",
        )
        # mapping to right value, instead of just index
        ax.scatter(times.take(time_idx), freqs.take(frequency_idx))
        ax.set_xlabel("Time")
        ax.set_ylabel("Frequency")
        ax.set_title("Spectrogram")
        plt.show()

    return zip(frequency_idx, time_idx)
示例#37
0
def get_peaks(spectrogram, plot=False):
    """Gets all the peaks from this spectrogram.
    """
    # generate the filter pattern (neighborhoods)
    peak_filter = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(peak_filter,
                                     NEIGHBORHOOD_SIZE).astype(int)

    # set each point equal to the maximum in it's neighborhood
    local_max = maximum_filter(spectrogram, footprint=neighborhood)

    # check where the 'local max' is equal to our original values.
    # these are our peaks.
    peaks = local_max == spectrogram

    # filter out background around the peaks:
    # http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/
    # scipy.ndimage.morphology.binary_erosion.html
    background = (spectrogram == 0)
    eroded = binary_erosion(background,
                            structure=neighborhood,
                            border_value=1)
    actual_peaks = peaks - eroded

    # problem here is that we see lots of peaks in LOW areas. Let's
    # filter out the low ones.
    amplitudes = spectrogram[actual_peaks].flatten()
    y, x = actual_peaks.astype(type).nonzero()
    all_peaks = zip(x, y, amplitudes)

    filtered_peaks = [p for p in all_peaks if p[2] > AMPLITUDE_THRESHOLD]

    if plot:
        fig = plt.figure()
        ax1 = fig.add_subplot(111)

        fingerprints = zip(*filtered_peaks)
        x, y = fingerprints[0], fingerprints[1]
        ax1.pcolor(spectrogram)
        ax1.scatter(x, y, c='blue')
        plt.show()

    return filtered_peaks
示例#38
0
def get_2D_peaks(arr2D, config):
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, config["fingerprint"]["neighborhood_size"])

    # find local maxima using our filter shape
    log.debug('fingerprint local maxima start')
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)

    log.debug('fingerprint binary_erosion start')
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    log.debug('fingerprint filter peaks start')
    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > config["fingerprint"]["amp_min"]]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if config["fingerprint"]["plot"]:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return zip(frequency_idx, time_idx)
示例#39
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    #  http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.iterate_structure.html#scipy.ndimage.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our filter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks (Fixed deprecated boolean operator by changing '-' to '^')
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = filter(lambda x: x[2]>amp_min, peaks) # freq, time, amp
    # get indices for frequency and time
    frequency_idx = []
    time_idx = []
    for x in peaks_filtered:
        frequency_idx.append(x[1])
        time_idx.append(x[0])
    
    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return zip(frequency_idx, time_idx)
示例#40
0
def mergeROIS(Y_res,A,b,C,f,S,d1,d2,P_,thr=0.85,mx=50,sn=None,deconv_method='spgl1',min_size=3,max_size=8,dist=3,method_exp = 'ellipse', expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int)):
    """
    merging of spatially overlapping components that have highly correlated temporal activity
    The correlation threshold for merging overlapping components is user specified in thr
     Inputs:
     Y_res:        np.ndarray 
            residual movie after subtracting all found components (Y_res = Y - A*C - b*f) (d x T)
     A:     sparse matrix
                matrix of spatial components (d x K)
     b:     np.ndarray
                spatial background (vector of length d)
     C:     np.ndarray
                matrix of temporal components (K x T)
     f:     np.ndarray
                temporal background (vector of length T)
     P_:     struct
                structure with neuron parameteres
     S:     np.ndarray            
                matrix of deconvolved activity (spikes) (K x T)
     thr:   scalar between 0 and 1
                correlation threshold for merging (default 0.85)
     mx:    int
                maximum number of merging operations (default 50)
     sn:    nd.array
                noise level for each pixel (vector of length d)
    
    Outputs:
     A:     sparse matrix
                matrix of merged spatial components (d x K)
     C:     np.ndarray
                matrix of merged temporal components (K x T)
     nr:    int
            number of components after merging
     P_:     struct
                structure with new neuron parameteres
     S:     np.ndarray            
                matrix of merged deconvolved activity (spikes) (K x T)
    
    % Written by:
    % Andrea Giovannucci from implementation of Eftychios A. Pnevmatikakis, Simons Foundation, 2015
    """
    
#%
    
    nr = A.shape[1]
    [d,T] = np.shape(Y_res)
    C_corr = np.corrcoef(C[:nr,:],C[:nr,:])[:nr,:nr];
    FF1=C_corr>=thr; #find graph of strongly correlated temporal components 
    A_corr=A.T*A
    A_corr.setdiag(0)
    FF2=A_corr>0            # % find graph of overlapping spatial components
    FF3=np.logical_and(FF1,FF2.todense())
    FF3=coo_matrix(FF3)
    c,l=csgraph.connected_components(FF3) # % extract connected components
    
    p=len(P_[0]['gn'])
    MC=[];
    for i in range(c):     
        if np.sum(l==i)>1:
            MC.append((l==i).T)
    MC=np.asarray(MC).T
    
    if MC.ndim>1:
        cor = np.zeros((np.shape(MC)[1],1));
        
            
        for i in range(np.size(cor)):
            fm = np.where(MC[:,i])[0]
            for j1 in range(np.size(fm)):        
                for j2 in range(j1+1,np.size(fm)):
                    print j1,j2
                    cor[i] = cor[i] +C_corr[fm[j1],fm[j2]]
        
        
        Y_res = Y_res + np.dot(b,f);
        if np.size(cor) > 1:
            ind=np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]
    
        nm = min((np.size(ind),mx))   # number of merging operations
    
        A_merged = lil_matrix((d,nm));
        C_merged = np.zeros((nm,T));
        S_merged = np.zeros((nm,T));
        
        P_merged=[];
        merged_ROIs = []
    #%
        for i in range(nm):
            P_cycle=dict()
            merged_ROI=np.where(MC[:,ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
    #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))    
            A_merged[:,i] = lil_matrix((A[:,merged_ROI]*scipy.sparse.diags(nC,0,(len(nC),len(nC)))).sum(axis=1))
    
            Y_res = Y_res + A[:,merged_ROI]*C[merged_ROI,:]
            
            aa_1=scipy.sparse.linalg.spsolve(scipy.sparse.diags(nC,0,(len(nC),len(nC))),csc_matrix(C[merged_ROI,:]))
            aa_2=(aa_1).mean(axis=0)        
            
            ff = np.nonzero(A_merged[:,i])[0]     
            
            cc,_,_,Ptemp,_ = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,method=deconv_method)  
            
            aa,bb,cc = update_spatial_components(np.asarray(Y_res),cc,f,A_merged[:,i],d1=d1,d2=d2,sn=sn,min_size=min_size,max_size=max_size,dist=dist,method = method_exp, expandCore =expandCore)
    
            A_merged[:,i] = aa.tocsr();        
    
            cc,_,_,Ptemp,ss = update_temporal_components(Y_res[ff,:],A_merged[ff,i],bb[ff],cc,f,p=p,method=deconv_method)
            
            P_cycle=P_[merged_ROI[0]].copy()
            P_cycle['gn']=Ptemp[0]['gn']
            P_cycle['b']=Ptemp[0]['b']
            P_cycle['c1']=Ptemp[0]['c1']
            P_cycle['neuron_sn']=Ptemp[0]['neuron_sn']
            P_merged.append(P_cycle)
            C_merged[i,:] = cc
            S_merged[i,:] = ss
            
            if i+1 < nm:
                Y_res[ff,:] = Y_res[ff,:] - A_merged[ff,i]*cc
                
        #%
        neur_id = np.unique(np.hstack(merged_ROIs))
                
        good_neurons=np.setdiff1d(range(nr),neur_id)    
        
        A = scipy.sparse.hstack((A[:,good_neurons],A_merged.tocsc()))
        C = np.vstack((C[good_neurons,:],C_merged))
        S = np.vstack((S[good_neurons,:],S_merged))
    #    P_new=list(P_[good_neurons].copy())
        P_new=[P_[pp] for pp in good_neurons]
        
        for p in P_merged:
            P_new.append(p)
    
        nr = nr - len(neur_id) + nm
    
    else:
        warnings.warn('No neurons merged!')
        merged_ROIs=[];
        P_new=P_
        
    return A,C,nr,merged_ROIs,P_new,S
示例#41
0
def update_spatial_components(Y, C, f, A_in, sn=None, dims=None, min_size=3, max_size=8, dist=3,
                              method='ellipse', expandCore=None, backend='single_thread', n_processes=4, n_pixels_per_process=128):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    Ain: np.ndarray
        spatial profile of background activity.

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    n_processes: [optional] int
        number of threads to use when the backend is multiprocessing,threading, or ipyparallel

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion


    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    """
    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not type(Y) is str:
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    C = np.atleast_2d(C)
    if C.shape[1] == 1:
        raise Exception('Dimension of Matrix C must be neurons x time')

    f = np.atleast_2d(f)
    if f.shape[1] == 1:
        raise Exception('Dimension of Matrix f must be neurons x time ')

    if len(A_in.shape) == 1:
        A_in = np.atleast_2d(A_in).T

    if A_in.shape[0] == 1:
        raise Exception('Dimension of Matrix A must be pixels x neurons ')


    Cf = np.vstack((C, f))  # create matrix that include background components

    [d, T] = np.shape(Y)

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    nr, _ = np.shape(C)       # number of neurons

    IND = determine_search_location(
        A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore)
    print " find search location"

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if backend == 'single_thread':

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1, sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T

    else:
        raise Exception(
            'Unknown backend specified: use single_thread, SLURM, multiprocessing or ipyparallel')

    #%
    print 'Updated Spatial Components'

    A_ = threshold_components(A_, dims)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

#    import pdb
#    pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update
    # baseline based on residual
    b = A_bas


    return A_, b, C
示例#42
0
def update_spatial_components_parallel(Y,C,f,A_in,sn=None, d1=None,d2=None,min_size=3,max_size=8, dist=3, method = 'ellipse', expandCore = None,backend='single_thread',n_processes=4,n_pixels_per_process=128, memory_efficient=False):
    """update spatial footprints and background     
    through Basis Pursuit Denoising

    for each pixel i solve the problem 
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to 
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);
    
    for each pixel the search is limited to a few spatial components
    
    Parameters
    ----------   
    Y: np.ndarray (2D)
        movie, raw data in 2D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron. 
    f: np.ndarray
        temporal profile  of background activity.
    Ain: np.ndarray
        spatial profile of background activity.    
        
    d1: [optional] int
        x movie dimension
        
    d2: [optional] int
        y movie dimension

    min_size: [optional] int
                
    max_size: [optional] int
                
    dist: [optional] int
        
        
    sn: [optional] float
        noise associated with each pixel if known
        
    n_processes: [optional] int
        number of threads to use when the backend is multiprocessing,threading, or ipyparallel
        
    backend [optional] str
        'multiprocessing', 'threading', 'ipyparallel', 'single_thread' 
        single_thread:no parallelization. It shoul dbe used in most cases.         
        multiprocessing or threading: use the corresponding python threading package. It has known issues on mac OS. Not to be used in most situations.
        ipyparallel: starts an ipython cluster and then send jobs to each of them 
        
    
    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread 
    
    memory_efficient [bool]
        whether or not to reduce memory usage (at the expense of increased computational time)
            
    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'
        
    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion


    Returns
    --------    
    A: np.ndarray        
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray        
         temporal components (updated only when spatial components are completely removed)             
       
    """

        
    
    if expandCore is None:
        expandCore=iterate_structure(generate_binary_structure(2,1), 2).astype(int)
    
    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')
    
    Y=np.atleast_2d(Y)
    if Y.shape[1]==1:
        raise Exception('Dimension of Matrix Y must be pixels x time')
    
    C=np.atleast_2d(C)
    if C.shape[1]==1:
        raise Exception('Dimension of Matrix C must be neurons x time')
    
    f=np.atleast_2d(f)
    if f.shape[1]==1:
         raise Exception('Dimension of Matrix f must be neurons x time ')
        
    if len(A_in.shape)==1:
        A_in=np.atleast_2d(A_in).T

    if A_in.shape[0]==1:
         raise Exception('Dimension of Matrix A must be pixels x neurons ')
    
    start_time = time.time()
    
    Cf = np.vstack((C,f)) # create matrix that include background components
        
    [d,T] = np.shape(Y)
    
    if n_pixels_per_process > d:
        raise Exception('The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    nr,_ = np.shape(C)       # number of neurons
    
    IND = determine_search_location(A_in,d1,d2,method = method, min_size = min_size, max_size = max_size, dist = dist, expandCore = expandCore)
    print " find search location"
    
    ind2_ =[ np.hstack( (np.where(iid_)[0] , nr+np.arange(f.shape[0])) )   if  np.size(np.where(iid_)[0])>0  else [] for iid_ in IND]


    folder = tempfile.mkdtemp()
    
    if backend == 'multiprocessing' or backend == 'threading':

        A_name = os.path.join(folder, 'A_temp')  
                      
        # Pre-allocate a writeable shared memory map as a container for the
        # results of the parallel computation     
        print "Create Matrix for dumping data from matrix A and C for parallel computation...."              
        A_ = np.memmap(A_name, dtype=A_in.dtype,shape=(d,nr+np.size(f,0)), mode='w+') 

        pixels_name = os.path.join(folder, 'pixels')

        C_name = os.path.join(folder, 'C_temp')          
        
        # Dump the input data to disk to free the memory
        dump(Y, pixels_name)
        dump(Cf, C_name)        
        
        # use mempry mapped versions of C and Y
        Y = load(pixels_name, mmap_mode='r')
        Cf = load(C_name, mmap_mode='r')
        
        pixel_groups=[range(i,i+n_pixels_per_process) for i in range(0,Y.shape[0]-n_pixels_per_process+1,n_pixels_per_process)]
        
        # Fork the worker processes to perform computation concurrently    
        print "start parallel pool..."
        sys.stdout.flush()
        Parallel(n_jobs=n_processes, backend=backend,verbose=100,max_nbytes=None)(delayed(lars_regression_noise_parallel)(Y,Cf,A_,sn,i,ind2_)
                            for i in pixel_groups) 
                        
            
        # if n_pixels_per_process is not a multiple of Y.shape[0] run on remaining pixels   
        pixels_remaining= Y.shape[0] %  n_pixels_per_process          
        if pixels_remaining>0:             
            print "Running deconvolution for remaining pixels:" + str(pixels_remaining)
            lars_regression_noise_parallel(Y,Cf,A_,sn,range(Y.shape[0]-pixels_remaining,Y.shape[0]),ind2_,positive=1)        
        A_=np.array(A_)
       
    elif backend == 'ipyparallel': # use the ipyparallel package, you need to start a cluster server (ipcluster command) in order to use it
              
        C_name = os.path.join(folder, 'C_temp.npy')        
        np.save(C_name,Cf)

        if type(Y) is np.core.memmap: # if input file is already memory mapped then find the filename 
            Y_name=Y.filename
        else:                        # if not create a memory mapped version (necessary for parallelization) 
            Y_name = os.path.join(folder, 'Y_temp.npy') 
            np.save(Y_name,Y)   
            Y=np.load(Y_name,mmap_mode='r') 
            
        # create arguments to be passed to the function. Here we are grouping bunch of pixels to be processed by each thread    
        pixel_groups=[(Y_name,C_name,sn,ind2_,range(i,i+n_pixels_per_process)) for i in range(0,d1*d2-n_pixels_per_process+1,n_pixels_per_process)]

        A_ = np.zeros((d,nr+np.size(f,0)))
        try: # if server is not running and raise exception if not installed or not started        
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise
        
        if len(c) <  n_processes:
            print len(c)
            raise Exception("the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")            
            
        dview=c[:n_processes] # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel, pixel_groups) 
        for chunk in parallel_result:
            for pars in chunk:
                px,idxs_,a=pars
                A_[px,idxs_]=a
        #clean up        
        dview.results.clear()   
        c.purge_results('all')
        c.purge_everything()
        c.close()
        
        
             
    elif backend=='single_thread':      

        Cf_=[Cf[idx_,:] for idx_ in ind2_]

        #% LARS regression 
        A_ = np.hstack((np.zeros((d,nr)),np.zeros((d,np.size(f,0)))))
        
        
        for c,y,s,id2_,px in zip(Cf_,Y,sn,ind2_,range(d)):
            if px%1000==0: 
                    print px
            if np.size(c)>0:                
                _, _, a, _ , _= lars_regression_noise(y, np.array(c.T), 1, sn[px]**2*T)
                if np.isscalar(a):
                    A_[px,id2_]=a
                else:
                    A_[px,id2_]=a.T
        
    else:
        raise Exception('Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel')
        
    #%
    print 'Updated Spatial Components'
    A_=threshold_components(A_, d1, d2)
    print "threshold"
    ff = np.where(np.sum(A_,axis=0)==0);           # remove empty components
    if np.size(ff)>0:
        ff = ff[0]
        warn('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_,list(ff),1)
        C = np.delete(C,list(ff),0)
    
    A_ = A_[:,:nr]                
    A_=coo_matrix(A_)

    if memory_efficient:
        print "Using memory efficient computation (slow but memory preserving)"
        A__=coo_matrix(A_,dtype=np.float32)
        C__=coo_matrix(C[:nr,:],dtype=np.float32)
        Y_res_name = os.path.join(folder, 'Y_res_temp.npy')
        Y_res = np.memmap(Y_res_name, dtype=np.float32, mode='w+', shape=Y.shape)
        Y_res = np.memmap(Y_res_name, dtype=np.float32, mode='r+', shape=Y.shape)
        print "computing residuals"        
        Y_res[:] = -A__.dot(C__).todense()[:]
        Y_res[:]+=Y
    else:   
        print "Using memory trade-off computation (good use of memory if input is memmaped)"         
        Y_res = Y - A_.dot(coo_matrix(C[:nr,:]))


    print "Computing A_bas"         
    A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update baseline based on residual
    Y_res[:]=1
    b = A_bas
    
    print("--- %s seconds ---" % (time.time() - start_time))
    
    try: #clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:
        
        raise Exception("Failed to delete: " + folder)
        
    return A_,b,C
示例#43
0
def mergeROIS(Y_res,A,b,C,f,d1,d2,dz,nr,P_,thr=0.8,mx=50,sn=None,deconv_method='spgl1',min_size=3,max_size=8,dist=3,method_exp = 'ellipse', expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int)):
    """
    merging of spatially overlapping components that have highly correlated tmeporal activity
    % The correlation threshold for merging overlapping components is user specified in P.merge_thr (default value 0.85)
    % Inputs:
    % Y_res:        residual movie after subtracting all found components
    % A:            matrix of spatial components
    % b:            spatial background
    % C:            matrix of temporal components
    % f:            temporal background
    % P:            parameter struct
    
    % Outputs:
    % A:            matrix of new spatial components
    % C:            matrix of new temporal components
    % nr:           new number of components
    % merged_ROIs:  list of old components that were merged
    
    % Written by:
    % Andrea Giovannucci from implementation of Eftychios A. Pnevmatikakis, Simons Foundation, 2015
    """
    
#%
    
    N = len(nr)
    [d,T] = np.shape(Y_res)
    C_corr = np.corrcoef(C[:N,:],C[:N,:])[:N,:N];
    FF1=C_corr>=thr; #find graph of strongly correlated temporal components 
    A_corr=A.T*A
    A_corr.setdiag(0)
    FF2=A_corr>0            # % find graph of overlapping spatial components
    FF3=np.logical_and(FF1,FF2.todense())
    FF3=coo_matrix(FF3)
    c,l=csgraph.connected_components(FF3) # % extract connected components
    
    p=len(P_[0]['gn'])
    MC=[];
    for i in range(c):     
        if np.sum(l==i)>1:
            MC.append((l==i).T)
    MC=np.asarray(MC).T
    
    if MC.ndim>1:
        cor = np.zeros((np.shape(MC)[1],1));
        
            
        for i in range(np.size(cor)):
            fm = np.where(MC[:,i])[0]
            for j1 in range(np.size(fm)):        
                for j2 in range(j1+1,np.size(fm)):
                    print j1,j2
                    cor[i] = cor[i] +C_corr[fm[j1],fm[j2]]
        
        
        Y_res = Y_res + np.dot(b,f);
        if np.size(cor) > 1:
            ind=np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]
    
        nm = min((np.size(ind),mx))   # number of merging operations
    
        A_merged = coo_matrix((d,nm)).tocsr();
        C_merged = np.zeros((nm,T));
        nr_merged = [0]*nm
        
        import pdb; pdb.set_trace()

        P_merged=[];
        merged_ROIs = []
    #%
        for i in range(nm):
            P_cycle=dict()
            merged_ROI=np.where(MC[:,ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(np.array(C[merged_ROI,:])**2,axis=1)) #SVR need to cast to array otherwise assumes matrix power
    #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))

            A = A.tocsr() #SVR A comes as coo_matrix which has no __get_item__
            A_merged[:,i] = csr_matrix((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))
    
            Y_res = Y_res + A[:,merged_ROI]*C[merged_ROI,:]
            
            nr_merged[i] = nr[merged_ROI[0]]

            aa_1=scipy.sparse.linalg.spsolve(spdiags(nC,0,len(nC),len(nC)),C[merged_ROI,:])
            aa_2=(aa_1).mean(axis=0)        
            
            ff = np.nonzero(A_merged[:,i])[0]     
            
            cc,_,_,Ptemp = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,deconv_method=deconv_method)  
            
            aa,bb,cc,_ = update_spatial_components(np.asarray(Y_res),cc,f,A_merged[:,i],d1=d1,d2=d2,dz=dz,nr=[nr_merged[i]],sn=sn,min_size=min_size,max_size=max_size,dist=dist,method = method_exp, expandCore =expandCore)
    
            A_merged[:,i] = aa.tocsr();        
    
            cc,_,_,Ptemp = update_temporal_components(Y_res[ff,:],A_merged[ff,i],bb[ff],cc,f,p=p,deconv_method=deconv_method)
            
            P_cycle=P_[merged_ROI[0]].copy()
            P_cycle['gn']=Ptemp[0]['gn']
            P_cycle['b']=Ptemp[0]['b']
            P_cycle['c1']=Ptemp[0]['c1']
            P_cycle['neuron_sn']=Ptemp[0]['neuron_sn']
            P_merged.append(P_cycle)
            C_merged[i,:] = cc
            if i+1 < nm:
                Y_res[ff,:] = Y_res[ff,:] - A_merged[ff,i]*cc
                
        #%
        neur_id = np.unique(np.hstack(merged_ROIs))
        
    
    
        good_neurons=np.setdiff1d(range(N),neur_id)    
        
        A= scipy.sparse.hstack((A[:,good_neurons],A_merged.tocsc()))
        C = np.vstack((C[good_neurons,:],C_merged))
        nr = [nrv for nri,nrv in enumerate(nr) if nri in good_neurons] + nr_merged

    #    P_new=list(P_[good_neurons].copy())
        P_new=[P_[pp] for pp in good_neurons]
        
        for p in P_merged:
            P_new.append(p)
    
        #SVR TODO: update nr appropriately after merge

        #nr = nr - len(neur_id) + nm
    
    else:
        warnings.warn('No neurons merged!')
        merged_ROIs=[];
        P_new=P_
        
    return A,C,nr_merged,merged_ROIs,P_new
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    logger.info("Selected viscous type is {}".format(args.type))

    # iterate over input images
    for image in args.images:

        # get and prepare image data
        logger.info("Loading image {} using NiBabel...".format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        logger.debug(
            "Intensity range of gradient image is ({}, {})".format(image_gradient_data.min(), image_gradient_data.max())
        )

        # build output file name and check for its existence, if not in sections mode
        if "sections" != args.type:
            # build output file name
            image_viscous_name = (
                args.folder
                + "/"
                + image.split("/")[-1][:-4]
                + "_viscous_{}_sec_{}_ds_{}".format(args.type, args.sections, args.dsize)
            )
            image_viscous_name += image.split("/")[-1][-4:]

            # check if output file exists
            if not args.force:
                if os.path.exists(image_viscous_name):
                    logger.warning("The output file {} already exists. Skipping this image.".format(image_viscous_name))
                    continue

        # execute plain closing i.e. a closing operation over the whole image, if in plain mode
        if "plain" == args.type:
            # prepare the disc structure (a ball with a diameter of (args.dsize * 2 + 1))
            disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_)

            # apply closing
            logger.info("Applying the morphology over whole image at once...")
            image_viscous_data = grey_closing(image_gradient_data, footprint=disc)

            # save resulting gradient image
            logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

            # skip other morphologies
            continue

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, args.sections)
        logger.debug("{} bins created".format(len(bins) - 1))

        # check if the number of bins is consistent
        if args.sections != len(bins) - 1:
            raise Exception(
                "Inconsistency between the number of requested and created bins ({} to {})".format(
                    args.sections, len(bins) - 1
                )
            )

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography (Note: the content of one bin is: bins[slice - 1] <= content < bins[slice]
        logger.info("Applying the viscous morphological operations {} times...".format(args.sections))
        for slice in range(1, args.sections + 1):

            # build output file name and check for its existence, if in sections mode
            if "sections" == args.type:
                # build output file name
                image_viscous_name = (
                    args.folder
                    + "/"
                    + image.split("/")[-1][:-4]
                    + "_viscous_{}_sec_{}_ds_{}_sl_{}".format(args.type, args.sections, args.dsize, slice)
                )
                image_viscous_name += image.split("/")[-1][-4:]

                # check if output file exists
                if not args.force:
                    if os.path.exists(image_viscous_name):
                        logger.warning(
                            "The output file {} already exists. Skipping this slice.".format(image_viscous_name)
                        )
                        continue

                # prepare result file
                image_viscous_data = image_gradient_data

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = image_gradient_data >= bins[slice]  # all voxels with are over the current slice
            mask_lower = image_gradient_data < bins[slice - 1]  # all voxels which are under the current slice
            mask_equal = scipy.invert(mask_greater | mask_lower)  # all voxels in the current slice
            if "mercury" == args.type:
                dsize = int((args.dsize / float(args.sections)) * (slice))
                disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_)
                mask_equal_or_greater = mask_equal | mask_greater
                image_threshold_data = image_gradient_data * mask_equal_or_greater
            elif "oil" == args.type:
                dsize = int((args.dsize / float(args.sections)) * (args.sections - slice + 1))
                disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                mask_equal_or_lower = mask_equal | mask_lower
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[mask_equal_or_lower].max()
            elif "sections" == args.type:
                dsize = args.dsize
                disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                # set all voxels under the current slice to zero
                image_threshold_data[mask_lower] = 0
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max()

            logger.debug(
                "{} of {} voxels belong to this level.".format(
                    len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape)
                )
            )

            # apply the closing with the appropriate disc size
            logger.debug(
                "Applying a disk of {} to all values >= {} and < {}...".format(dsize, bins[slice - 1], bins[slice])
            )
            image_closed_data = grey_closing(image_threshold_data, footprint=disc)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data)

            # save created output file, if in sections mode
            if "sections" == args.type:
                # save resulting gradient image
                logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
                image_viscous = image_like(image_viscous_data, image_gradient)
                save(image_viscous, image_viscous_name)

        # save created output file, if not in sections mode
        if "sections" != args.type:
            # save resulting gradient image
            logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

    logger.info("Successfully terminated.")
示例#45
0
def determine_search_location(A, dims, method='ellipse', min_size=3, max_size=8, dist=3,
                              expandCore=iterate_structure(generate_binary_structure(2, 1), 2).astype(int), dview=None):
    """
    restrict search location to subset of pixels

    TODO
    """
    from scipy.ndimage.morphology import grey_dilation
    from scipy.sparse import coo_matrix, issparse

    if len(dims) == 2:
        d1, d2 = dims
    elif len(dims) == 3:
        d1, d2, d3 = dims

    d, nr = np.shape(A)

    A = csc_matrix(A)

    IND = False * np.ones((d, nr))
    if method == 'ellipse':
        Coor = dict()
        if len(dims) == 2:
            Coor['x'] = np.kron(np.ones(d2), list(range(d1)))
            Coor['y'] = np.kron(list(range(d2)), np.ones(d1))
        elif len(dims) == 3:
            Coor['x'] = np.kron(np.ones(d3 * d2), list(range(d1)))
            Coor['y'] = np.kron(np.kron(np.ones(d3), list(range(d2))), np.ones(d1))
            Coor['z'] = np.kron(list(range(d3)), np.ones(d2 * d1))
        if not dist == np.inf:             # determine search area for each neuron
            cm = np.zeros((nr, len(dims)))        # vector for center of mass
            Vr = []    # cell(nr,1);
            IND = []       # indicator for distance

            for i, c in enumerate(['x', 'y', 'z'][:len(dims)]):
                cm[:, i] = old_div(np.dot(Coor[c], A[:, :nr].todense()), A[:, :nr].sum(axis=0))

#            for i in range(nr):            # calculation of variance for each component and construction of ellipses
#                dist_cm = coo_matrix(np.hstack([Coor[c].reshape(-1, 1) - cm[i, k]
#                                                for k, c in enumerate(['x', 'y', 'z'][:len(dims)])]))
#                Vr.append(dist_cm.T * spdiags(A[:, i].toarray().squeeze(),
#                                              0, d, d) * dist_cm / A[:, i].sum(axis=0))
#
#                if np.sum(np.isnan(Vr)) > 0:
#                    raise Exception('You cannot pass empty (all zeros) components!')
#
#                D, V = eig(Vr[-1])
#
#                dkk = [np.min((max_size**2, np.max((min_size**2, dd.real)))) for dd in D]
#
#                # search indexes for each component
#                IND.append(np.sqrt(np.sum([(dist_cm * V[:, k])**2 / dkk[k]
#                                           for k in range(len(dkk))], 0)) <= dist)
#            IND = (np.asarray(IND)).squeeze().T
            pars = []
            for i in range(nr):
                pars.append([Coor, cm[i], A[:, i], Vr, dims, dist, max_size, min_size, d])

            if dview is None:
                res = list(map(contruct_ellipse_parallel, pars))
            else:
                res = dview.map_sync(contruct_ellipse_parallel, pars)

            for r in res:
                IND.append(r)

            IND = (np.asarray(IND)).squeeze().T

        else:
            IND = True * np.ones((d, nr))
    elif method == 'dilate':
        for i in range(nr):
            A_temp = np.reshape(A[:, i].toarray(), dims[::-1])  # , order='F')
            # A_temp = np.reshape(A[:, i].toarray(), (d2, d1))
            if len(expandCore) > 0:
                if len(expandCore.shape) < len(dims):  # default for 3D
                    expandCore = iterate_structure(
                        generate_binary_structure(len(dims), 1), 2).astype(int)
                A_temp = grey_dilation(A_temp, footprint=expandCore)
            else:
                A_temp = grey_dilation(A_temp, [1] * len(dims))

            IND[:, i] = np.squeeze(np.reshape(A_temp, (d, 1))) > 0
    else:
        IND = True * np.ones((d, nr))

    return IND
示例#46
0
文件: morpho.py 项目: ricounet67/gala
def diamond_se(radius, dimension):
    se = generate_binary_structure(dimension, 1)
    return iterate_structure(se, radius)
def update_spatial_components(Y,C,f,A_in,d1=None,d2=None,dz=None, nr=None, min_size=3,max_size=8,dist=3,sn=None,use_parallel=False, method = 'ellipse', expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int)):
    #% set variables
    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')
    
    Y=np.atleast_2d(Y)
    if Y.shape[1]==1:
        raise Exception('Dimension of Matrix Y must be pixels x time')
    
    C=np.atleast_2d(C)
    if C.shape[1]==1:
        raise Exception('Dimension of Matrix C must be neurons x time')
    
    f=np.atleast_2d(f)
    if f.shape[1]==1:
         raise Exception('Dimension of Matrix f must be neurons x time ')
        
    if len(A_in.shape)==1:
        A_in=np.atleast_2d(A_in).T

    if A_in.shape[0]==1:
         raise Exception('Dimension of Matrix A must be pixels x neurons ')
    
    Cf = np.vstack((C,f))
    
    start_time = time.time()
    [d,T] = np.shape(Y)

    if nr is None:
        nr = [0]*np.shape(C)[0]       # number of neurons if it is not a list of number per channel

    N = len(nr) #Total number of neurons
    nch = len(np.unique(nr)) #Total number of channels
    shape = (dz, d1, d2, 1+np.max(nr))

    if type(method) == str:
        method = [method]*nch
    elif len(method) < nch:
        raise Exception('Number of methods must be same as number of channels')

    DIST = 3
    dist = DIST #SVR Hack to tighten up search neighborhoods

    IND = []
    
    #coms = precalculate_coms(A_in, d1,d2, dz, nr)

    for ni,zi in it.product(xrange(nch), xrange(dz)): 

        INDtmp = np.zeros((d1*d2, N)).astype(bool)

        comp_search_out = determine_search_location(A_in[plane_idx(shape, zi, ni),comp_idx(nr,ni)],
            d1,d2, method = method[ni], min_size = min_size, max_size = max_size,
            dist = dist, expandCore = expandCore)
    
        if len(comp_search_out.shape)==1:
            comp_search_out = comp_search_out.reshape((comp_search_out.shape[0],1))
        INDtmp[:,comp_idx(nr,ni)] = comp_search_out

        IND.extend(list(INDtmp)) # indices per pixel

    # Copy search indices across channels to allow for demixing
    IND = np.array(IND)
    for zi in xrange(dz):
        for ni in xrange(nch):
            for nj in xrange(nch):

                if nj != ni:
                # copy channel search indices to same components but on other channels
                    IND[plane_idx(shape, zi, nj), comp_idx(nr,ni)] = IND[plane_idx(shape, zi, ni), comp_idx(nr, ni)]
    
    IND = list(IND)

    Y_ = list(Y) # list per pixel

    ind2_ =[ np.hstack( (np.where(iid_)[0] , N+np.arange(f.shape[0])) )   if  np.size(np.where(iid_)[0])>0  else [] for iid_ in IND]

    Cf_=[Cf[idx_,:] for idx_ in ind2_]

    #% LARS regression 
    A_ = np.hstack((np.zeros((d,N)),np.zeros((d,np.size(f,0)))))
    
    if use_parallel:
        raise Exception('NOT IMPLEMENTED YET')
        #import multiprocessing as mp    
        #pool = mp.Pool(processes=8)
        #results = [pool.apply(basis_pursuit_denoising, args=(x.T,y.T,z)) for x,y,z in zip(Y_,Cf_,sn)]
        #print(results)
    else:
        for c,y,s,id2_,px in zip(Cf_,Y_,sn,ind2_,range(d)):
            if px%1000==0: 
                    print px
            if np.size(c)>0:                
                _, _, a, _ , _= lars_regression_noise(y, np.array(c.T), 1, sn[px]**2*T)
                if np.isscalar(a):
                    A_[px,id2_]=a
                else:
                    A_[px,id2_]=a.T
              
                
    
    #%
    print 'Updated Spatial Components'
    
    #Hack to mask background
    ff = np.where(np.sum(A_,axis=0)==0);           # remove empty components
    ff = ff[0]
    ff = [fi for fi in ff if fi < N] #SVR Make sure not to include background components for elimination
    if np.size(ff)>0:
        warn('eliminating empty components!!')
        nr = [n for ni, n in enumerate(nr) if ni not in ff]
        A_ = np.delete(A_,ff,1)
        C = np.delete(C,ff,0)
#        raise Exception('Eliminated empty component. Reduce number of neurons')
        
    N = len(nr)

    Y_res = Y - np.dot(A_[:,:N],C[:N,:])
    A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update baseline based on residual
    b = A_bas
    A_ = A_[:,:N]    
            
    A_=coo_matrix(A_)
    print("--- %s seconds ---" % (time.time() - start_time))
    return A_,b,C, nr
示例#48
0
def get_2D_peaks(arr2D, visualize=False, amp_min=DEFAULT_AMP_MIN):
    """
    extract peaks from spectrogram
    ###########################################################################
    #http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
    ###########################################################################
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    """

    # define an 4-connected neighborhood
    #   #1#
    #   111
    #   #1#
    struct = generate_binary_structure(2, 1)

    # define an 8-connected neighborhood
    #struct = generate_binary_structure(2, 2)

    # iterate this structure to get larger mask with param
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maximum using our filter shape
    #apply the local maximum filter; all pixel of maximal value
    #in their neighborhood are set to 1
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D

    #local_max is a mask that contains the peaks we are
    #looking for, but also the background.
    #In order to isolate the peaks we must remove the background from the mask.
    background = (arr2D == 0)

    #a little technicality: we must erode the background in order to
    #successfully subtract it form local_max, otherwise a line will
    #appear along the background border (artifact of the local maximum filter)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    #we obtain the final mask, containing only peaks,
    #by removing the background from the local_max mask
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    print 'arr2D', len(arr2D)
    print 'amps', len(amps)

    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if visualize:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()

        # save or show
        plt.show()
        #plt.savefig('fingerprints.png')

    return zip(frequency_idx, time_idx)
示例#49
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3,normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1, method_ls='nnls_L0'):

    """update spatial footprints and background through Basis Pursuit Denoising 

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty        
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated 

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)


    """
    C=np.array(C)
    if normalize_yyt_one:
#        cct=np.diag(C.dot(C.T))
        nr_C=np.shape(C)[0]
        d = scipy.sparse.lil_matrix((nr_C,nr_C))
        d.setdiag(np.sqrt(np.sum(C**2,1)))
        A_in=A_in*d
        C=old_div(C,np.sqrt(np.sum(C**2,1)[:,np.newaxis]))   


    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not isinstance(Y, basestring):
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception('Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print("spatial support for each components given by the user")
        if C is None:
            INDav = old_div(IND.astype('float32'), np.sum(IND, axis=0))
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(
            A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore, dview=dview)
        print("found spatial support for each component")
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print((np.shape(A_in)))

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)       # number of neurons

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print(('cluster temporary folder:' + tmpf))
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()


    if dview is None:

        Y_name = Y
        C_name = Cf

    else:

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif isinstance(Y, basestring) or dview is None:
            Y_name = Y
        else:
            raise Exception('Not implemented consistently')
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
# for i in range(0, np.prod(dims) - n_pixels_per_process + 1,
# n_pixels_per_process)]
    cct=np.diag(C.dot(C.T))
    rank_f=nb
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(range(i, i + n_pixels_per_process)), method_ls, cct,rank_f])

    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(range(i, np.prod(dims))), method_ls, cct,rank_f])

    A_ = np.zeros((d, nr + np.size(f, 0)))
    print('Starting Update Spatial Components')

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(regression_ipyparallel, pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
##
#        Cf_ = [Cf[idx_, :] for idx_ in ind2_]
#
#        #% LARS regression
#        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))
#        
#        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
#            if px % 1000 == 0:
#                print px
#            if np.size(c) > 0:
#                _, _, a, _, _ = lars_regression_noise_old(y, np.array(c.T), 1, sn[px]**2 * T)
#                if np.isscalar(a):
#                    A_[px, id2_] = a
#                else:
#                    A_[px, id2_] = a.T
##

    #%
    print('Updated Spatial Components')

    A_ = threshold_components(A_, dims, dview=dview, medw=(3, 3), thr_method=thr_method, maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc,
                              se=se, ss=ss)

    print("threshold")
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    # pdb.set_trace()
#    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y,f.T,block_size=5000,dview=dview) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    print("Computing A_bas")
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)  # update baseline based on residual
    # A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print(("--- %s seconds ---" % (time.time() - start_time)))

    try:  # clean up
        # remove temporary file created
        print("Remove temporary file created")
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    if A_in.dtype == bool:

        return A_, b, C, f
    else:
        return A_, b, C
示例#50
0
def determine_search_location(A, dims, method='ellipse', min_size=3, max_size=8, dist=3,
                              expandCore=iterate_structure(generate_binary_structure(2, 1), 2).astype(int), dview=None):
    """
    compute the indices of the distance from the cm to search for the spatial component

    does it by following an ellipse from the cm or doing a step by step dilatation around the cm


    Parameters:
    ----------
    [parsed]
     cm[i]:
        center of mass of each neuron

     A[:, i]: the A of each components

     dims:
        the dimension of each A's ( same usually )

     dist:
        computed distance matrix

     dims: [optional] tuple
                x, y[, z] movie dimensions

    method: [optional] string
            method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
            if method is dilate this represents the kernel used for expansion

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int

    dims: [optional] tuple
             x, y[, z] movie dimensions

    Returns:
    --------
    dist_indicator: np.ndarray
        distance from the cm to search for the spatial footprint

    Raise:
    -------
    Exception('You cannot pass empty (all zeros) components!')
    """

    from scipy.ndimage.morphology import grey_dilation

    # we initialize the values
    if len(dims) == 2:
        d1, d2 = dims
    elif len(dims) == 3:
        d1, d2, d3 = dims
    d, nr = np.shape(A)
    A = csc_matrix(A)
    dist_indicator = scipy.sparse.csc_matrix((d, nr),dtype= np.float32)

    if method == 'ellipse':
        Coor = dict()
        # we create a matrix of size A.x of each pixel coordinate in A.y and inverse
        if len(dims) == 2:
            Coor['x'] = np.kron(np.ones(d2), list(range(d1)))
            Coor['y'] = np.kron(list(range(d2)), np.ones(d1))
        elif len(dims) == 3:
            Coor['x'] = np.kron(np.ones(d3 * d2), list(range(d1)))
            Coor['y'] = np.kron(
                np.kron(np.ones(d3), list(range(d2))), np.ones(d1))
            Coor['z'] = np.kron(list(range(d3)), np.ones(d2 * d1))
        if not dist == np.inf:  # determine search area for each neuron
            cm = np.zeros((nr, len(dims)))  # vector for center of mass
            Vr = []  # cell(nr,1);
            dist_indicator = []
            pars = []
            # for each dim
            for i, c in enumerate(['x', 'y', 'z'][:len(dims)]):
                # mass center in this dim = (coor*A)/sum(A)
                cm[:, i] = old_div(
                    np.dot(Coor[c], A[:, :nr].todense()), A[:, :nr].sum(axis=0))

            # parrallelizing process of the construct ellipse function
            for i in range(nr):
                pars.append([Coor, cm[i], A[:, i], Vr, dims,
                             dist, max_size, min_size, d])
            if dview is None:
                res = list(map(construct_ellipse_parallel, pars))
            else:
                if 'multiprocessing' in str(type(dview)):
                    res = dview.map_async(
                        construct_ellipse_parallel, pars).get(4294967)
                else:
                    res = dview.map_sync(construct_ellipse_parallel, pars)
            for r in res:
                dist_indicator.append(r)

            dist_indicator = (np.asarray(dist_indicator)).squeeze().T

        else:
            raise Exception('Not implemented')
            dist_indicator = True * np.ones((d, nr))

    elif method == 'dilate':
        for i in range(nr):
            A_temp = np.reshape(A[:, i].toarray(), dims[::-1])
            if len(expandCore) > 0:
                if len(expandCore.shape) < len(dims):  # default for 3D
                    expandCore = iterate_structure(
                        generate_binary_structure(len(dims), 1), 2).astype(int)
                A_temp = grey_dilation(A_temp, footprint=expandCore)
            else:
                A_temp = grey_dilation(A_temp, [1] * len(dims))

            dist_indicator[:, i] = scipy.sparse.coo_matrix(np.squeeze(np.reshape(A_temp, (d, 1)))[:,None] > 0)
    else:
        raise Exception('Not implemented')
        dist_indicator = True * np.ones((d, nr))

    return dist_indicator
示例#51
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3, normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True, b_in=None,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1,
                              method_ls='lasso_lars', update_background_components=True, low_rank_background=True, block_size=1000, num_blocks_per_run=20):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters:
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).

    C: np.ndarray
        calcium activity of each neuron.

    f: np.ndarray
        temporal profile  of background activity.

    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A.
        Otherwise it is used to determine it through determine_search_location

    b_in: np.ndarray
        you can pass background as input, especially in the case of one background per patch, since it will update using hals

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int

    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread

    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    update_background_components:bool
        whether to update the background components in the spatial phase

    low_rank_background:bool
        whether to update the using a low rank approximation. In the False case all the nonzero elements of the background components are updated using hals
        (to be used with one background per patch)


    Returns:
    --------
    A: np.ndarray
         new estimate of spatial footprints

    b: np.ndarray
        new estimate of spatial background

    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    f: np.ndarray
        same as f_in except if empty component deleted.

    Raise:
    -------
    Exception('You need to define the input dimensions')

    Exception('Dimension of Matrix Y must be pixels x time')

    Exception('Dimension of Matrix C must be neurons x time')

    Exception('Dimension of Matrix f must be background comps x time ')

    Exception('Either A or C need to be determined')

    Exception('Dimension of Matrix A must be pixels x neurons ')

    Exception('You need to provide estimate of C and f')

    Exception('Not implemented consistently')

    Exception("Failed to delete: " + folder)
    """
    print('Initializing update of Spatial Components')

    if expandCore is None:
        expandCore = iterate_structure(
            generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    # shape transformation and tests
    Y, A_in, C, f, n_pixels_per_process, rank_f, d, T = test(
        Y, A_in, C, f, n_pixels_per_process, nb)

    start_time = time.time()
    print('computing the distance indicators')
    # we compute the indicator from distance indicator
    ind2_, nr, C, f, b_, A_in = computing_indicator(
        Y, A_in, b_in, C, f, nb, method, dims, min_size, max_size, dist, expandCore, dview)
    if normalize_yyt_one and C is not None:
        C = np.array(C)
        nr_C = np.shape(C)[0]
        d_ = scipy.sparse.lil_matrix((nr_C, nr_C))
        d_.setdiag(np.sqrt(np.sum(C ** 2, 1)))
        A_in = A_in * d_
        C = old_div(C, np.sqrt(np.sum(C ** 2, 1)[:, np.newaxis]))

    if b_in is None:
        b_in = b_

    print('memmaping')
    # we create a memory map file if not already the case, we send Cf, a
    # matrix that include background components
    C_name, Y_name, folder = creatememmap(Y, np.vstack((C, f)), dview)

    # we create a pixel group array (chunks for the cnmf)for the parrallelization of the process
    print('Updating Spatial Components using lasso lars')
    cct = np.diag(C.dot(C.T))
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, i + n_pixels_per_process)), method_ls, cct, ])
    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, np.prod(dims))), method_ls, cct])
    A_ = np.zeros((d, nr + np.size(f, 0)))  # init A_
    if dview is not None:
        if 'multiprocessing' in str(type(dview)):
            parallel_result = dview.map_async(
                regression_ipyparallel, pixel_groups).get(4294967)
        else:
            parallel_result = dview.map_sync(
                regression_ipyparallel, pixel_groups)
            dview.results.clear()
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))

    for chunk in parallel_result:
        for pars in chunk:
            px, idxs_, a = pars
            A_[px, idxs_] = a

    print("thresholding components")
    A_ = threshold_components(A_, dims, dview=dview, medw=medw, thr_method=thr_method,
                              maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc, se=se, ss=ss)

    ff = np.where(np.sum(A_, axis=0) == 0)  # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating {} empty spatial components'.format(len(ff)))
        A_ = np.delete(A_, list(ff[ff < nr]), 1)
        C = np.delete(C, list(ff[ff < nr]), 0)
        nr = nr - len(ff[ff < nr])
        if low_rank_background:
            background_ff = list(filter(lambda i: i >= nb, ff - nr))
            f = np.delete(f, background_ff, 0)
        else:
            background_ff = list(filter(lambda i: i >= 0, ff - nr))
            f = np.delete(f, background_ff, 0)
            b_in = np.delete(b_in, background_ff, 1)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y, f.T, dview=dview, block_size=block_size, num_blocks_per_run=num_blocks_per_run) - \
            A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        # Y*f' - A*(C*f')
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    if update_background_components:

        if b_in is None:
            # update baseline based on residual
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)
        else:
            ind_b = [np.where(_b)[0] for _b in b_in.T]
            b = HALS4shape_bckgrnd(Y_resf, b_in, f, ind_b)

    else:
        if b_in is None:
            raise Exception(
                'If you set the update_background_components to True you have to pass them as input to update_spatial')
        # try:
        #    b = np.delete(b_in, background_ff, 0)
        # except NameError:
        b = b_in

    print(("--- %s seconds ---" % (time.time() - start_time)))
    try:  # clean up
        # remove temporary file created
        print("Removing tempfiles created")
        shutil.rmtree(folder)
    except:
        raise Exception("Failed to delete: " + folder)

    return A_, b, C, f
示例#52
0
def two_dim_findpeaks(image, peak_width=None, medfilt_radius=5, xc_filter=True, 
                      kill_edges=True, kill_duplicates=True):
        """
        Takes an image and detect the peaks using the local maximum filter.
        Returns a boolean mask of the peaks (i.e. 1 when
        the pixel's value is the neighborhood maximum, 0 otherwise)
        
        Code by Ivan:
        http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
        
        Returns a 2D numpy array with one row per peak, two columns (X index first)
        """
        
        from scipy.ndimage.filters import maximum_filter
        from scipy.ndimage.morphology import generate_binary_structure, binary_erosion, \
             iterate_structure
        from scipy.ndimage import gaussian_filter
        
        from analyzarr.lib.cv.cv_funcs import xcorr

        if medfilt_radius is not None:
            image = medfilt(image, medfilt_radius)
            # blur it; sharp edges seem to mess up peak finding
            image = gaussian_filter(image, medfilt_radius)

        if peak_width is None:
            peak_width=estimate_peak_width(image,medfilt=medfilt_radius)
            
        if xc_filter:
            # the normal gaussian
            xg, yg = np.mgrid[0:peak_width, 0:peak_width]
            def gaussian(height, center_x, center_y, width_x, width_y):
                """Returns a gaussian function with the given parameters"""
                width_x = float(width_x)
                width_y = float(width_y)
                return lambda x,y: height*np.exp(
                            -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
            
            templateImage = gaussian(255, (peak_width/2)+1, (peak_width/2)+1, (peak_width/4)+1, 
                                    peak_width/4+1)(xg, yg)            
            cleaned_image = xcorr(templateImage, image)
        else:
            cleaned_image=image
        #medfilt(image, medfilt_radius)
        #peak_width=estimate_peak_width(cleaned_image,medfilt=None, 
        #                               max_peak_width=max_peak_width)        
            
        # define an 8-connected neighborhood
        neighborhood = generate_binary_structure(2,1)
        neighborhood = iterate_structure(neighborhood, int(peak_width/4))
    
        #apply the local maximum filter; all pixel of maximal value 
        #in their neighborhood are set to 1
        local_max = maximum_filter(cleaned_image, footprint=neighborhood)==cleaned_image
        #local_max is a mask that contains the peaks we are 
        #looking for, but also the background.
        #In order to isolate the peaks we must remove the background from the mask.
    
        #we create the mask of the background
        background = (cleaned_image==0)
    
        #a little technicality: we must erode the background in order to 
        #successfully subtract it form local_max, otherwise a line will 
        #appear along the background border (artifact of the local maximum filter)
        eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
    
        #we obtain the final mask, containing only peaks, 
        #by removing the background from the local_max mask
        detected_peaks = local_max - eroded_background
        
        # convert the mask to indices:
        detected_peaks = detected_peaks.nonzero()
        
        # format the two arrays into one
        detected_peaks = np.vstack((detected_peaks[0],detected_peaks[1])).T
        
        if kill_duplicates:
            detected_peaks=_kill_duplicates(detected_peaks)
        if kill_edges:
            detected_peaks=_kill_edges(image, detected_peaks, peak_width/8)
        
        # translate to actual image peak, instead of cross correlation peak
        if xc_filter:
            detected_peaks=detected_peaks+peak_width/2
        
        # get peak heights as third column
        heights = np.array([image[pk[0],pk[1]] for pk in detected_peaks])
        peaks = np.hstack((detected_peaks, heights.reshape((-1,1))))
    
        return peaks
def update_spatial_components(Y,C,f,A_in,d1=None,d2=None,min_size=3,max_size=8,dist=3,sn=None,use_parallel=False, method = 'ellipse', expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int)):
    #% set variables
    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')
    
    Y=np.atleast_2d(Y)
    if Y.shape[1]==1:
        raise Exception('Dimension of Matrix Y must be pixels x time')
    
    C=np.atleast_2d(C)
    if C.shape[1]==1:
        raise Exception('Dimension of Matrix C must be neurons x time')
    
    f=np.atleast_2d(f)
    if f.shape[1]==1:
         raise Exception('Dimension of Matrix f must be neurons x time ')
        
    if len(A_in.shape)==1:
        A_in=np.atleast_2d(A_in).T

    if A_in.shape[0]==1:
         raise Exception('Dimension of Matrix A must be pixels x neurons ')
    
    
    Cf = np.vstack((C,f))
    
    start_time = time.time()
    [d,T] = np.shape(Y)
    nr,_ = np.shape(C)       # number of neurons
    
    IND = determine_search_location(A_in,d1,d2,method = method, min_size = min_size, max_size = max_size, dist = dist, expandCore = expandCore)
    
    IND=list(IND) # indices per pixel
    
    Y_ = list(Y) # list per pixel
    
    ind2_ =[ np.hstack( (np.where(iid_)[0] , nr+np.arange(f.shape[0])) )   if  np.size(np.where(iid_)[0])>0  else [] for iid_ in IND]

    Cf_=[Cf[idx_,:] for idx_ in ind2_]

    #% LARS regression 
    A_ = np.hstack((np.zeros((d,nr)),np.zeros((d,np.size(f,0)))))
    
    if use_parallel:
        raise Exception('NOT IMPLEMENTED YET')
        #import multiprocessing as mp    
        #pool = mp.Pool(processes=8)
        #results = [pool.apply(basis_pursuit_denoising, args=(x.T,y.T,z)) for x,y,z in zip(Y_,Cf_,sn)]
        #print(results)
    else:
        for c,y,s,id2_,px in zip(Cf_,Y_,sn,ind2_,range(d)):
            if px%1000==0: 
                    print px
            if np.size(c)>0:                
                _, _, a, _ , _= lars_regression_noise(y, np.array(c.T), 1, sn[px]**2*T)
                if np.isscalar(a):
                    A_[px,id2_]=a
                else:
                    A_[px,id2_]=a.T
              
                
                
    
    #%
    print 'Updated Spatial Components'
    A_=threshold_components(A_, d1, d2)
    ff = np.where(np.sum(A_,axis=0)==0);           # remove empty components
    if np.size(ff)>0:
        ff = ff[0]
        warn('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_,list(ff),1)
        C = np.delete(C,list(ff),0)
#        raise Exception('Eliminated empty component. Reduce number of neurons')
        
    Y_res = Y - np.dot(A_[:,:nr],C[:nr,:])
    A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update baseline based on residual
    b = A_bas
    A_ = A_[:,:nr]    
            
    A_=coo_matrix(A_)
    print("--- %s seconds ---" % (time.time() - start_time))
    return A_,b,C
示例#54
0
def update_spatial_components(Y, C, f, A_in, sn=None, d1=None, d2=None, min_size=3, max_size=8, dist=3, 
                              method='ellipse', expandCore=None, backend='single_thread', n_processes=4, n_pixels_per_process=128 ):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D)
        movie, raw data in 2D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    Ain: np.ndarray
        spatial profile of background activity.

    d1: [optional] int
        x movie dimension

    d2: [optional] int
        y movie dimension

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    n_processes: [optional] int
        number of threads to use when the backend is multiprocessing,threading, or ipyparallel

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them


    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion


    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    """
    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')
    
    if Y.ndim<2 and not type(Y) is str:
        Y = np.atleast_2d(Y)
        
    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    C = np.atleast_2d(C)
    if C.shape[1] == 1:
        raise Exception('Dimension of Matrix C must be neurons x time')

    f = np.atleast_2d(f)
    if f.shape[1] == 1:
        raise Exception('Dimension of Matrix f must be neurons x time ')

    if len(A_in.shape) == 1:
        A_in = np.atleast_2d(A_in).T

    if A_in.shape[0] == 1:
        raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    Cf = np.vstack((C, f))  # create matrix that include background components

    [d, T] = np.shape(Y)    

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    nr, _ = np.shape(C)       # number of neurons
    
    IND = determine_search_location(
        A_in, d1, d2, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore)
    print " find search location"


    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    folder = tempfile.mkdtemp()

    # use the ipyparallel package, you need to start a cluster server
    # (ipcluster command) in order to use it
    if backend == 'ipyparallel':

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename            
        # if not create a memory mapped version (necessary for parallelization)
        elif type(Y) is str:
            Y_name = Y            
        else:
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)            
            Y,_,_,_=load_memmap(Y_name)    

        # create arguments to be passed to the function. Here we are grouping
        # bunch of pixels to be processed by each thread
        pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
                        for i in range(0, d1 * d2 - n_pixels_per_process + 1, n_pixels_per_process)]

        A_ = np.zeros((d, nr + np.size(f, 0)))
    
        try:  # if server is not running and raise exception if not installed or not started
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise

        if len(c) < n_processes:
            print len(c)
            raise Exception(
                "the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")

        dview = c[:n_processes]  # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)                        
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel, pixel_groups)
        # clean up
       
        
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
        
        dview.results.clear()
        c.purge_results('all')
        c.purge_everything()
        c.close()

    elif backend == 'single_thread':

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1, sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T

    else:
        raise Exception(
            'Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel')
    
    #%
    print 'Updated Spatial Components'
   
    A_ = threshold_components(A_, d1, d2)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
    

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)
    
#    import pdb 
#    pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update
    # baseline based on residual
    b = A_bas

    print("--- %s seconds ---" % (time.time() - start_time))

    try:  # clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    return A_, b, C
示例#55
0
def update_spatial_components(Y,C,f,A_in,d1=None,d2=None,min_size=3,max_size=8,dist=3,sn=None, method = 'ellipse', expandCore = None):
    """update spatial footprints and background     
    through Basis Pursuit Denoising

    for each pixel i solve the problem 
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to 
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);
    
    for each pixel the search is limited to a few spatial components
    
    Parameters
    ----------   
    Y: np.ndarray (2D)
        movie, raw data in 2D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron. 
    f: np.ndarray
        temporal profile  of background activity.
    Ain: np.ndarray
        spatial profile of background activity.    
        
    d1: [optional] int
        x movie dimension
    d2: [optional] int
        y movie dimension
    min_size: [optional] int
        
    max_size: [optional] int
        
    dist: [optional] int
        
    sn: [optional] float
        
    n_processes: [optional] int
        
    method: [optional] string
        
    expandCore: [optional]  scipy.ndimage.morphology
        

    Returns
    --------
    
    A: np.ndarray        
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray        
         temporal components (updated only when spatial components are completely removed)             
       
    """
    if expandCore is None:
        expandCore=iterate_structure(generate_binary_structure(2,1), 2).astype(int)
    
    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')
    
    Y=np.atleast_2d(Y)
    if Y.shape[1]==1:
        raise Exception('Dimension of Matrix Y must be pixels x time')
    
    C=np.atleast_2d(C)
    if C.shape[1]==1:
        raise Exception('Dimension of Matrix C must be neurons x time')
    
    f=np.atleast_2d(f)
    if f.shape[1]==1:
         raise Exception('Dimension of Matrix f must be neurons x time ')
        
    if len(A_in.shape)==1:
        A_in=np.atleast_2d(A_in).T

    if A_in.shape[0]==1:
         raise Exception('Dimension of Matrix A must be pixels x neurons ')
    
    start_time = time.time()

    
    Cf = np.vstack((C,f))
        
    [d,T] = np.shape(Y)

    nr,_ = np.shape(C)       # number of neurons

    
    IND = determine_search_location(A_in,d1,d2,method = method, min_size = min_size, max_size = max_size, dist = dist, expandCore = expandCore)
    
    IND=list(IND) # indices per pixel
    
    Y_ = list(Y) # list per pixel
    
    ind2_ =[ np.hstack( (np.where(iid_)[0] , nr+np.arange(f.shape[0])) )   if  np.size(np.where(iid_)[0])>0  else [] for iid_ in IND]

    Cf_=[Cf[idx_,:] for idx_ in ind2_]

    #% LARS regression 
    A_ = np.hstack((np.zeros((d,nr)),np.zeros((d,np.size(f,0)))))


    for c,y,s,id2_,px in zip(Cf_,Y_,sn,ind2_,range(d)):
        if px%1000==0: 
                print px
        if np.size(c)>0:                
            _, _, a, _ , _= lars_regression_noise(y, np.array(c.T), 1, sn[px]**2*T)
            if np.isscalar(a):
                A_[px,id2_]=a
            else:
                A_[px,id2_]=a.T
          
                
                
    
    #%
    print 'Updated Spatial Components'
    A_=threshold_components(A_, d1, d2)
    ff = np.where(np.sum(A_,axis=0)==0);           # remove empty components
    if np.size(ff)>0:
        ff = ff[0]
        warn('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_,list(ff),1)
        C = np.delete(C,list(ff),0)
#        raise Exception('Eliminated empty component. Reduce number of neurons')
        
    Y_res = Y - np.dot(A_[:,:nr],C[:nr,:])
    A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update baseline based on residual
    b = A_bas
    A_ = A_[:,:nr]    
            
    A_=coo_matrix(A_)
    print("--- %s seconds ---" % (time.time() - start_time))
    return A_,b,C
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    logger.info('Executing weighted viscous morphology with {} ({} bins).'.format(','.join(map(str, args.func)), len(args.func)))
        
    # iterate over input images
    for image in args.images:
        
        # build output file name
        image_viscous_name = args.folder + '/' + image.split('/')[-1][:-4] + '_wviscous_' + '_'.join(map(str, args.func))
        image_viscous_name += image.split('/')[-1][-4:]
        
        # check if output file exists
        if not args.force:
            if os.path.exists(image_viscous_name):
                logger.warning('The output file {} already exists. Skipping this image.'.format(image_viscous_name))
                continue
        
        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)
        
        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())
        
        # prepare result image and extract required attributes of input image
        if args.debug:
            logger.debug('Intensity range of gradient image is ({}, {})'.format(image_gradient_data.min(), image_gradient_data.max()))
        
        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, len(args.func))
        logger.debug('{} bins created'.format(len(bins) -1))
        
        # check if the number of bins is consistent
        if len(args.func) != len(bins) - 1:
            raise Exception('Inconsistency between the number of requested and created bins ({} to {})'.format(args.sections, len(bins) - 1))
        
        # prepare result file
        image_viscous_data = image_gradient_data
        
        # transform the gradient images topography
        logger.info('Applying the viscous morphological operations on {} sections...'.format(len(args.func)))
        for sl in range(1, len(args.func) + 1):
            
            # create sphere to use in this step
            if 0 >= args.func[sl - 1]: continue # sphere of sizes 0 or below lead to no changes and are not executed
            sphere = iterate_structure(generate_binary_structure(3, 1), args.func[sl - 1]).astype(scipy.int_)
            
            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[sl]) # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[sl - 1]) # all voxels which are under the current slice
            mask_equal = scipy.invert(mask_greater | mask_lower) # all voxels in the current slice
            
            # extract slice
            image_threshold_data = image_gradient_data.copy()
            image_threshold_data[mask_lower] = 0 # set all voxels under the current slice to zero
            image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max() # set all voxels over the current slice to the max of all voxels in the current slice
            
            logger.debug('{} of {} voxels belong to this level.'.format(len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape)))            
            
            # apply the closing with the appropriate sphere
            logger.debug('Applying a disk of {} to all values >= {} and < {} (sec {})...'.format(args.func[sl - 1], bins[sl - 1],  bins[sl], sl))
            image_closed_data = grey_closing(image_threshold_data, footprint=sphere)
            
            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data)
                    
        # save resulting gradient image
        logger.info('Saving resulting gradient image as {}...'.format(image_viscous_name))
        image_viscous = image_like(image_viscous_data, image_gradient)
        save(image_viscous, image_viscous_name)
            
    logger.info('Successfully terminated.')
示例#57
0
def determine_search_location(A, d1, d2, method = 'ellipse', min_size = 3, max_size = 8, dist = 3, expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int)):

    from scipy.ndimage.morphology import grey_dilation 
    from scipy.sparse import coo_matrix, issparse

    d, nr = np.shape(A)
    
    A = csc_matrix(A)
        
    IND = False*np.ones((d,nr))
    if method == 'ellipse':
        Coor=dict();
        Coor['x'] = np.kron(np.ones((d2,1)),np.expand_dims(range(d1),axis=1)); 
        Coor['y'] = np.kron(np.expand_dims(range(d2),axis=1),np.ones((d1,1)));
        if not dist==np.inf:             # determine search area for each neuron
            cm = np.zeros((nr,2));        # vector for center of mass
            Vr = []    # cell(nr,1);
            IND = [];       # indicator for distance								   
            cm[:,0]=np.dot(Coor['x'].T,A[:,:nr].todense())/A[:,:nr].sum(axis=0)
            cm[:,1]=np.dot(Coor['y'].T,A[:,:nr].todense())/A[:,:nr].sum(axis=0) 
            for i in range(nr):            # calculation of variance for each component and construction of ellipses
                dist_cm=coo_matrix(np.hstack((Coor['x'] - cm[i,0], Coor['y'] - cm[i,1])))            
                Vr.append(dist_cm.T*spdiags(A[:,i].toarray().squeeze(),0,d,d)*dist_cm/A[:,i].sum(axis=0))        
                D,V=eig(Vr[-1])        
                d11 = np.min((max_size**2,np.max((min_size**2,D[0].real))))
                d22 = np.min((max_size**2,np.max((min_size**2,D[1].real))))
                IND.append(np.sqrt((dist_cm*V[:,0])**2/d11 + (dist_cm*V[:,1])**2/d22)<=dist)       # search indexes for each component

            IND=(np.asarray(IND)).squeeze().T
        else:
            IND = True*np.ones((d,nr))
    elif method == 'dilate':
        for i in range(nr):
            A_temp = np.reshape(A[:,i].todense(),(d2,d1))
            if len(expandCore)> 0:
                A_temp = grey_dilation(A_temp, footprint = expandCore)
            else:
                A_temp = grey_dilation(A_temp, (1,1))            
            
#            A_temp = grey_dilation(A_temp, footprint = expandCore)
            IND[:,i] = np.squeeze(np.reshape(A_temp,(d,1)))>0
    else:
        IND = True*np.ones((d,nr))
            
    return IND
示例#58
0
def chooseAnchors(spec, method):
    stime=time.time()
    if method == 1:
        # original
        spec *= spec
        cutoff = args.anchorThresh * np.std(spec)
        
        t3=np.where(spec<cutoff)
        t4=np.where(spec>=cutoff)
        spec[t3]=0
        print "cutoff val:",cutoff
        print "below cutoff:",len(t3[0])
        print "over cutoff:",len(t4[0])

        tmp=np.delete(spec.flatten(),np.where(spec.flatten()<cutoff))
        
        if args.showPlots:
            h = np.histogram(tmp,bins=100)
            pylab.bar(h[1][1:],h[0])
            pylab.autoscale()
            pylab.show()
        
        # return anchor points
        log.info("ChooseAnchor(1) time: %fs" % (time.time()-stime))
        tmp[np.where(tmp>0)]=1
        return tmp
    elif method == 2:
        # non squared (Better)
        cutoff = args.anchorThresh * np.std(spec)
        
        # swing everything positive
        spec = abs(spec)
        
        belowCut=np.where(spec<cutoff)
        aboveCut=np.where(spec>=cutoff)
        spec[belowCut]=0
        print "cutoff val:",cutoff
        #print "below cutoff:",len(belowCut[0])
        #print "over cutoff(Anchors):",len(aboveCut[0])
        
        if args.showPlots:
            spec2 = spec.copy()
            spec2[np.where(spec2>0)] = np.log(spec2[np.where(spec2>0)])
            img = pylab.imshow(np.transpose(spec2))
            pylab.colorbar(img)
            pylab.show()
        
        print spec.shape
        #tmp=np.delete(spec,np.where(spec<cutoff))
        spec[np.where(spec<cutoff)] = 0
        spec[np.where(spec>=cutoff)] = 1
        #print tmp.flatten().shape
        #print np.min(tmp), np.max(tmp)
        
        if args.showPlots:
            print "Final distribution of Anchors:"
            h = np.histogram(tmp,bins=100)
            pylab.bar(h[1][1:],h[0])
            pylab.autoscale()
            pylab.show()
        
        # return anchor points
        #tmp[np.where(tmp>0)]=1
        log.info("ChooseAnchor(2) time: %fs" % (time.time()-stime))
        return spec
    elif method==3:
        # method as described in github worldveil/dejavu/fingerprint.py
        # generate binary mask
        binMask = generate_binary_structure(2,1)
        grownBinMask = iterate_structure(binMask, args.footprintSize)
        
        filter = maximum_filter(spec, footprint=grownBinMask)
        local_max = filter == spec
        background = (spec == 0)
        eroded_background = binary_erosion(background, structure=grownBinMask,
                                       border_value=1)

        # Boolean mask of arr2D with True at peaks
        detected_peaks = local_max - eroded_background
        return detected_peaks.astype(int)
    else:
        print "you fail"
        exit()