Exemple #1
0
def __sliding_window_fast(array: np.ndarray,
                          window_size: int,
                          mode: str = "attack") -> np.ndarray:
    if mode == "attack":
        window_size = make_odd(window_size)
        return maximum_filter1d(array, size=(2 * window_size - 1))
    half_window_size = (window_size - 1) // 2
    array = np.pad(array, (half_window_size, 0))
    return maximum_filter1d(array, size=window_size)[:-half_window_size]
Exemple #2
0
def prominent_peaks(image, min_xdistance, min_ydistance, threshold=None):
    img = image.copy()
    rows, cols = img.shape
    if threshold is None:
        threshold = 0.5 * np.max(img)
    yc_size = 2 * min_ydistance + 1
    xc_size = 2 * min_xdistance + 1
    img_max = maximum_filter1d(img,
                               size=yc_size,
                               axis=0,
                               mode='constant',
                               cval=0)
    img_max = maximum_filter1d(img_max,
                               size=xc_size,
                               axis=1,
                               mode='constant',
                               cval=0)
    mask = (img == img_max)
    img *= mask
    img_t = img > threshold

    label_img = measure.label(img_t)
    props = measure.regionprops(label_img, img_max)
    props = sorted(props, key=lambda x: x.max_intensity)[::-1]
    coords = np.array([np.round(p.centroid) for p in props], dtype=int)
    yc_peaks = []
    xc_peaks = []

    yc_ext, xc_ext = np.mgrid[-min_ydistance:min_ydistance + 1,
                              -min_xdistance:min_xdistance + 1]
    for yc_idx, xc_idx in coords:
        accum = img_max[yc_idx, xc_idx]
        if accum > threshold:
            yc_nh = yc_idx + yc_ext
            xc_nh = xc_idx + xc_ext

            yc_in = np.logical_and(yc_nh > 0, yc_nh < rows)
            yc_nh = yc_nh[yc_in]
            xc_nh = xc_nh[yc_in]

            xc_low = xc_nh < 0
            yc_nh[xc_low] = rows - yc_nh[xc_low]
            xc_nh[xc_low] += cols
            xc_high = xc_nh >= cols
            yc_nh[xc_high] = rows - yc_nh[xc_high]
            xc_nh[xc_high] -= cols

            img_max[yc_nh, xc_nh] = 0
            yc_peaks.append(yc_idx)
            xc_peaks.append(xc_idx)

    return np.transpose(np.vstack(
        (np.array(xc_peaks), np.array(yc_peaks)))).astype(int)
Exemple #3
0
    def _find_peak_locations_fn(logits, size, threshold):

        assert size % 2 == 1
        assert logits.ndim == 2 and logits.shape[1] == 88
        logits_max = maximum_filter1d(logits,
                                      size=size,
                                      axis=0,
                                      mode='constant')
        assert logits_max.shape == logits.shape
        logits_peak_ids = np.logical_and(logits == logits_max,
                                         logits > threshold)

        # Due to numerical precision, there could be multiple peaks within a window.
        # In this case, we use the first peak and remove the other peaks.
        num_frames = len(logits)
        hs = (size - 1) // 2
        for pitch in xrange(88):
            for frame_idx in xrange(num_frames - hs):
                if logits_peak_ids[frame_idx, pitch]:
                    for fidx in xrange(hs):
                        if logits_peak_ids[frame_idx + 1 + fidx, pitch]:
                            logits_peak_ids[frame_idx + 1 + fidx,
                                            pitch] = False

        logits_peak_ids = np.where(logits_peak_ids)

        return logits_peak_ids
Exemple #4
0
def preprocessFeatureMat(X, Lfilt):
	"""
	Binarizes and blurs the feature matrix

	Parameters
	----------
	X : 2D array
		The original feature matrix (presumably output by buildFeatureMat())
	Lfilt : int
		The width of the hamming filter used to blur the feature matrix.

	Returns
	-------
	X : 2D array
		The modified feature matrix without blur
	Xblur : 2D array
		The modified feature matrix with blur
	"""
	Xblur = _filterRows(X, Lfilt)

	# ensure that the maximum value in Xblur is 1; we do this by dividing
	# by the largets value within Lfilt / 2, rather than just clamping, so
	# that there's a smooth dropoff as you move away from dense groups of
	# 1s in X; otherwise it basically ends up max-pooled
	maxima = filters.maximum_filter1d(Xblur, Lfilt // 2, axis=1, mode='constant')
	Xblur[maxima > 0] /= maxima[maxima > 0]

	# have columns be adjacent in memory
	return np.asfortranarray(X), np.asfortranarray(Xblur)
Exemple #5
0
def dff(C, sig_baseline=10, win_baseline=300, sig_output=3, method='maximin'):
    """
    delta F / F using maximin method from Suite2P
    inputs: C - neuropil subtracted fluorescence (neurons x timepoints)
    outputs dFF -  neurons x timepoints

    :param C:
    :param sig_baseline:
    :param win_baseline:
    :param sig_output:
    :param method:
    :return:
    """

    if method == 'maximin':  # windowed baseline estimation
        flow = filters.gaussian_filter(C, [0, sig_baseline])
        flow = filters.minimum_filter1d(flow, win_baseline, axis=1)
        flow = filters.maximum_filter1d(flow, win_baseline, axis=1)
    else:
        flow = None
        raise NotImplementedError

    C -= flow  # substract baseline (dF)
    C /= flow  # divide by baseline (dF/F)
    return filters.gaussian_filter(C, [0, sig_output])  # smooth result
Exemple #6
0
def _rolling_nanmax_1d(a, w=None):
    """
    Compute the rolling max for 1-D while ignoring NaNs.

    This essentially replaces:

        `np.nanmax(rolling_window(T[..., start:stop], m), axis=T.ndim)`

    Parameters
    ----------
    a : ndarray
        The input array

    w : ndarray, default None
        The rolling window size

    Returns
    -------
    output : ndarray
        Rolling window nanmax.
    """
    if w is None:
        w = a.shape[0]

    half_window_size = int(math.ceil((w - 1) / 2))
    return maximum_filter1d(a, size=w)[half_window_size:half_window_size +
                                       a.shape[0] - w + 1]
    def detect(self, threshold, combine=30, pre_avg=100, pre_max=30,
               post_avg=30, post_max=70, delay=0):
        """
        Detects the onsets.

        :param threshold: threshold for peak-picking
        :param combine:   only report 1 onset for N miliseconds
        :param pre_avg:   use N miliseconds past information for moving average
        :param pre_max:   use N miliseconds past information for moving maximum
        :param post_avg:  use N miliseconds future information for mov. average
        :param post_max:  use N miliseconds future information for mov. maximum
        :param delay:     report the onset N miliseconds delayed

        In online mode, post_avg and post_max are set to 0.

        Implements the peak-picking method described in:

        "Evaluating the Online Capabilities of Onset Detection Methods"
        Sebastian Böck, Florian Krebs and Markus Schedl
        Proceedings of the 13th International Society for Music Information
        Retrieval Conference (ISMIR), 2012

        """
        # online mode?
        if self.online:
            post_max = 0
            post_avg = 0
        # convert timing information to frames
        pre_avg = int(round(self.fps * pre_avg / 1000.))
        pre_max = int(round(self.fps * pre_max / 1000.))
        post_max = int(round(self.fps * post_max / 1000.))
        post_avg = int(round(self.fps * post_avg / 1000.))
        # convert to seconds
        combine /= 1000.
        delay /= 1000.
        # init detections
        self.detections = []
        # moving maximum
        max_length = pre_max + post_max + 1
        max_origin = int(np.floor((pre_max - post_max) / 2))
        mov_max = maximum_filter1d(self.activations, max_length,
                                   mode='constant', origin=max_origin)
        # moving average
        avg_length = pre_avg + post_avg + 1
        avg_origin = int(np.floor((pre_avg - post_avg) / 2))
        mov_avg = uniform_filter1d(self.activations, avg_length,
                                   mode='constant', origin=avg_origin)
        # detections are activation equal to the maximum
        detections = self.activations * (self.activations == mov_max)
        # detections must be greater or equal than the mov. average + threshold
        detections = detections * (detections >= mov_avg + threshold)
        # convert detected onsets to a list of timestamps
        last_onset = 0
        for i in np.nonzero(detections)[0]:
            onset = float(i) / float(self.fps) + delay
            # only report an onset if the last N miliseconds none was reported
            if onset > last_onset + combine:
                self.detections.append(onset)
                # save last reported onset
                last_onset = onset
Exemple #8
0
def preprocessFeatureMat(X, Lfilt):
    """
	Binarizes and blurs the feature matrix

	Parameters
	----------
	X : 2D array
		The original feature matrix (presumably output by buildFeatureMat())
	Lfilt : int
		The width of the hamming filter used to blur the feature matrix.

	Returns
	-------
	X : 2D array
		The modified feature matrix without blur
	Xblur : 2D array
		The modified feature matrix with blur
	"""
    Xblur = _filterRows(X, Lfilt)

    # ensure that the maximum value in Xblur is 1; we do this by dividing
    # by the largets value within Lfilt / 2, rather than just clamping, so
    # that there's a smooth dropoff as you move away from dense groups of
    # 1s in X; otherwise it basically ends up max-pooled
    maxima = filters.maximum_filter1d(Xblur,
                                      Lfilt // 2,
                                      axis=1,
                                      mode='constant')
    Xblur[maxima > 0] /= maxima[maxima > 0]

    # have columns be adjacent in memory
    return np.asfortranarray(X), np.asfortranarray(Xblur)
def Undersampled_Lip_Tragectory(phrase, Sleep_Time):
    A = "espeak -z -s 100 -v female5 -w test.wav "
    A = A + "'" + phrase + "'"
    #os.system("espeak -z -s 80 -v female5 -w test.wav 'Hey, why no one is looking at me? I feel neglected. I feel it! I am afraid!' ")
    os.system(A)
    samplerate, data = wavfile.read('test.wav')
    dt = 1 / float(samplerate)
    times = np.arange(len(data)) / float(samplerate)
    N = len(times)
    max_data = maximum_filter1d(data, size=1000)
    max_data = gaussian_filter(max_data, sigma=100)
    max_Amplitude = 10
    Amplitude = max_Amplitude * (max_data / float(np.max(max_data)))
    n = Sleep_Time * samplerate
    Amp = []
    T = []
    i = 0
    while (i * n < N):
        Amp.append(Amplitude[int(i * n)])
        T.append(times[int(i * n)])
        i = i + 1
    Amp = np.array(Amp)
    T = np.array(T)
    '''
    plt.figure(1)
    plt.suptitle(phrase)
    plt.subplot(211)
    plt.plot(times,data)
    plt.plot(times,max_data,'r')
    plt.subplot(212)
    plt.plot(times,Amplitude)
    plt.plot(T,Amp,'r*')
    plt.show()
    '''
    return Amp, T
def Undersampled_Lip_Tragectory(phrase, Sleep_Time):
    A = "espeak -z -s 80 -v female5 -w test.wav "
    A = A + "'" + phrase + "'"
    os.system(A)
    samplerate, data = wavfile.read('test.wav')
    dt = 1 / float(samplerate)
    times = np.arange(len(data)) / float(samplerate)
    N = len(times)
    max_data = maximum_filter1d(data, size=1000)
    max_data = gaussian_filter(max_data, sigma=100)
    max_Amplitude = 10
    Amplitude = max_Amplitude * (max_data / float(np.max(max_data)))
    n = Sleep_Time * samplerate
    Amp = []
    T = []
    i = 0
    while (i * n < N):
        Amp.append(Amplitude[int(i * n)])
        T.append(times[int(i * n)])
        i = i + 1

    Amp = np.array(Amp)
    T = np.array(T)

    return Amp, T
Exemple #11
0
def Speech(phrase):
    flag = Event()
    flag.set()
    A = "espeak -z -s 80 -w temp.wav "
    A = A + "'" + phrase + "'"
    os.system(A)
    samplerate, data = wavfile.read('temp.wav')
    times = np.arange(len(data)) / float(samplerate)
    max_data = maximum_filter1d(data, size=500)
    max_Amplitude = 10
    Amplitude = max_Amplitude * (max_data / float(np.max(max_data)))

    plt.figure(1)
    plt.plot(times, data)
    plt.plot(times, max_data, 'r')
    plt.show()
    plt.figure(2)
    plt.plot(times, Amplitude)
    plt.show()
    thread_movement = Thread(target=MoveLips, args=(times, Amplitude, flag))
    thread_talk = Thread(target=Talk, args=(phrase, flag))

    thread_talk.start()
    thread_movement.start()
    thread_talk.join()

    thread_movement.join()

    print(np.max(max_data))
Exemple #12
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    #  http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.iterate_structure.html#scipy.ndimage.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
    # find local maxima using our filter shape
    local_max = maximum_filter1d(arr2D,
                                 size=PEAK_NEIGHBORHOOD_SIZE * 2 + 1,
                                 axis=0)
    local_max = maximum_filter1d(local_max,
                                 size=PEAK_NEIGHBORHOOD_SIZE * 2 + 1,
                                 axis=1)
    local_max = local_max == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks (Fixed deprecated boolean operator by changing '-' to '^')
    detected_peaks = local_max ^ eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = filter(lambda x: x[2] > amp_min, peaks)  # freq, time, amp
    # get indices for frequency and time
    frequency_idx = []
    time_idx = []
    for x in peaks_filtered:
        frequency_idx.append(x[1])
        time_idx.append(x[0])

    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel('Time')
        ax.set_ylabel('Frequency')
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return zip(frequency_idx, time_idx)
Exemple #13
0
def validate_signal(t, y, t_ref, y_ref, num=1000, dx=20, dy=0.1):
    """ Validate a signal y(t) against a reference signal y_ref(t_ref) by creating a band
    around y_ref and finding the values in y outside the band

    Parameters:

        t       time of the signal
        y       values of the signal
        t_ref   time of the reference signal
        y_ref   values of the reference signal
        num     number of samples for the band
        dx      horizontal width of the band in samples
        dy      vertical distance of the band to y_ref

    Returns:

        t_band  time values of the band
        y_min   lower limit of the band
        y_max   upper limit of the band
        i_out   indices of the values in y outside the band
    """

    from scipy.ndimage.filters import maximum_filter1d, minimum_filter1d
    from scipy.interpolate import interp1d

    # re-sample the reference signal into a uniform grid
    t_band = np.linspace(start=t_ref[0], stop=t_ref[-1], num=num)

    # make t_ref strictly monotonic by adding epsilon to duplicate sample times
    for i in range(1, len(t_ref)):
        while t_ref[i - 1] >= t_ref[i]:
            t_ref[i] = t_ref[i] + 1e-13

    interp_method = 'linear' if y.dtype == np.float64 else 'zero'
    y_band = interp1d(x=t_ref, y=y_ref, kind=interp_method)(t_band)

    y_band_min = np.min(y_band)
    y_band_max = np.max(y_band)

    # calculate the width of the band
    if y_band_min == y_band_max:
        w = 0.5 if y_band_min == 0 else np.abs(y_band_min) * dy
    else:
        w = (y_band_max - y_band_min) * dy

    # calculate the lower and upper limits
    y_min = minimum_filter1d(input=y_band, size=dx) - w
    y_max = maximum_filter1d(input=y_band, size=dx) + w

    # find outliers
    y_min_i = np.interp(x=t, xp=t_band, fp=y_min)
    y_max_i = np.interp(x=t, xp=t_band, fp=y_max)
    i_out = np.logical_or(y < y_min_i, y > y_max_i)

    # do not count outliers outside the t_ref
    i_out = np.logical_and(i_out, t > t_band[0])
    i_out = np.logical_and(i_out, t < t_band[-1])

    return t_band, y_min, y_max, i_out
Exemple #14
0
def get_mask(y, rpad=20, nmax=20):
    xp = y[1, :, :].flatten().astype('int32')
    yp = y[0, :, :].flatten().astype('int32')
    _, Ly, Lx = y.shape
    xm, ym = np.meshgrid(np.arange(Lx), np.arange(Ly))

    xedges = np.arange(-.5 - rpad, xm.shape[1] + .5 + rpad, 1)
    yedges = np.arange(-.5 - rpad, xm.shape[0] + .5 + rpad, 1)
    #xp = (xm-dx).flatten().astype('int32')
    #yp = (ym-dy).flatten().astype('int32')
    h, _, _ = np.histogram2d(xp, yp, bins=[xedges, yedges])

    hmax = maximum_filter1d(h, 5, axis=0)
    hmax = maximum_filter1d(hmax, 5, axis=1)

    yo, xo = np.nonzero(np.logical_and(h - hmax > -1e-6, h > 10))
    Nmax = h[yo, xo]
    isort = np.argsort(Nmax)[::-1]
    yo, xo = yo[isort], xo[isort]
    pix = []
    for t in range(len(yo)):
        pix.append([yo[t], xo[t]])

    for iter in range(5):
        for k in range(len(pix)):
            ye, xe = extendROI(pix[k][0], pix[k][1], h.shape[0], h.shape[1], 1)
            igood = h[ye, xe] > 2
            ye, xe = ye[igood], xe[igood]
            pix[k][0] = ye
            pix[k][1] = xe

    ibad = np.ones(len(pix), 'bool')
    for k in range(len(pix)):
        #print(pix[k][0].size)
        if pix[k][0].size < nmax:
            ibad[k] = 0

    #pix = [pix[k] for k in ibad.nonzero()[0]]

    M = np.zeros(h.shape)
    for k in range(len(pix)):
        M[pix[k][0], pix[k][1]] = 1 + k

    M0 = M[rpad + xp, rpad + yp]
    M0 = np.reshape(M0, xm.shape)
    return M0, pix
def compute_sliding_minmax(array, Window, sig=2):
    if sig > 0:
        Flow = filters.gaussian_filter1d(array, sig)
    else:
        Flow = array
    Flow = filters.minimum_filter1d(Flow, Window, mode='wrap')
    Flow = filters.maximum_filter1d(Flow, Window, mode='wrap')
    return Flow
Exemple #16
0
def filtermax(f, maxfiltsize=10):
    """Apply  maximum filter to the spectrum to ignore deeper fluxes of absorption lines."""
    # Maximum filter to ignore deeper fluxes of absorption lines
    f_maxfilt = maximum_filter1d(f, size=maxfiltsize)
    # Find points selected by maximum filter
    idxmax = np.array([i for i in range(len(f)) if f[i] - f_maxfilt[i] == 0.])

    return f_maxfilt, idxmax
Exemple #17
0
def center_baseline(V, sigma=100, window=500):
    ''' centers V so the baseline is at 0 '''
    Flow = filters.gaussian_filter(V.T, [0.,sigma])
    Flow = filters.minimum_filter1d(Flow, window)
    Flow = filters.maximum_filter1d(Flow, window)
    V_centered = (V.T - Flow).T
    #V_centered = (V.T - Flow.mean(axis=1)[:,np.newaxis]).T
    return V_centered
Exemple #18
0
def find_reps(y, threshold, open_size, close_size):
    """
    From the Y profile of a barbell's path, determine the concentric phase of each rep.

    The algorithm is as follows:
        1. Compute the gradient (dy/dt) of the Y motion
        2. Binarize the gradient signal by a minimum threshold value to eliminate noise.
        3. Perform 1D opening by open_size using a minimum then maximum filter in series.
        4. Perform 1D closing by close_size using a maximum then minimum filter in series.

    The result is a step function that is true for every time point that the concentric (+Y) phase of the rep
    is being performed.

    Parameters
    ----------
    y : (N) array
        Y component of the motion of the barbell path.
    threshold : float
        Miniumum acceptable value of the gradient (dY/dt) to indicate a rep.
        Increasing this can help eliminate noise, but may cause a small delay after a rep begins to when it is
        counted, therefore underestimating the time to complete a rep.
    open_size : int
        Minimum threshold of length of time that it takes to complete a rep (in frames).
        Increase this if there are false positive spikes in the rep step signal that are small in width.
    close_size : int
        Minimum length of time that could be between reps.
        Increase this if there are false breaks between reps that should be continuous.

    Returns
    -------
    (N) array
        Step signal representing when reps are performed. (1 indicates concentric phase of rep, 0 indicates no rep).
    """
    ygrad = np.gradient(y)
    rep_signal = np.where(ygrad > threshold, 1, 0)

    # Opening to remove spikes
    rep_signal = maximum_filter1d(minimum_filter1d(rep_signal, open_size),
                                  open_size)

    # Closing to connect movements (as in the step up from the jerk)
    rep_signal = minimum_filter1d(maximum_filter1d(rep_signal, close_size),
                                  close_size)

    return rep_signal
def Synch_framestart(signal, L, spread=300, threshold=0.8):
    P = Synch_P(signal, L)
    R = Synch_R(signal, L)
    R = maximum_filter1d(R, spread)
    M = ((np.abs(P))**2) / (R**2)
    start, end = longest_block((M > threshold), 0)[:2]
    frame_start = int((start + end) / 2)
    freq_offset = np.mean(np.angle(P[start:end]))
    return frame_start, freq_offset, end
Exemple #20
0
def connectedRegion(mLam, rsmall, d0):
    mLam0 = np.zeros(rsmall.shape)
    mLam1 = np.zeros(rsmall.shape)
    # non-zero lam's
    mLam0[rsmall<=d0] = mLam>0
    mLam1[rsmall<=d0] = mLam

    mmax = mLam1.argmax()
    mask = np.zeros(rsmall.size)
    mask[mmax] = 1
    mask = np.resize(mask, (2*d0+1, 2*d0+1))

    for m in range(int(np.ceil(mask.shape[0]/2))):
        mask = filters.maximum_filter1d(mask, 3, axis=0) * mLam0
        mask = filters.maximum_filter1d(mask, 3, axis=1) * mLam0
        #mask = filters.maximum_filter(mask, footprint=rsmall<=1.5) * mLam0

    mLam *= mask[rsmall<=d0]
    return mLam
Exemple #21
0
 def smoothingInput(self):
     print(self.edge_det_window, self.smoothing_window, self.smoothing_var)
     depths = np.array(self.data_list[0][self.fileIndex])
     smoothing_filter = gaussian(self.smoothing_window, self.smoothing_var) / \
                        np.sum(gaussian(self.smoothing_window, self.smoothing_var))
     union_depths = maximum_filter1d(
         depths, self.edge_det_window)  ## MAX POOL to extract boarder lines
     final_depth = np.convolve(union_depths, smoothing_filter,
                               mode='same')  ## Smoothing boarder lines
     return final_depth
Exemple #22
0
def smooth_covars(cov, size):
    T = cov.shape[0]
    dX = cov.shape[1]
    traces = np.empty([T])
    for t in range(T):
        traces[t] = np.trace(cov[t])
    smoothed_traces = maximum_filter1d(traces, size)
    for t in range(T):
        if smoothed_traces[t] > traces[t]:
            cov[t] += np.eye(dX) * (smoothed_traces[t] - traces[t]) / dX
Exemple #23
0
def preprocess(F: np.ndarray,
               baseline: str,
               win_baseline: float,
               sig_baseline: float,
               fs: float,
               prctile_baseline: float = 0.9) -> np.ndarray:
    """ preprocesses fluorescence traces for spike deconvolution

    baseline-subtraction with window 'win_baseline'
    
    Parameters
    ----------------

    F : float, 2D array
        size [neurons x time], in pipeline uses neuropil-subtracted fluorescence

    baseline : str
        setting that describes how to compute the baseline of each trace

    win_baseline : float
        window (in seconds) for max filter

    sig_baseline : float
        width of Gaussian filter in seconds

    fs : float
        sampling rate per plane

    prctile_baseline : float
        percentile of trace to use as baseline if using `constant_prctile` for baseline
    
    Returns
    ----------------

    F : float, 2D array
        size [neurons x time], baseline-corrected fluorescence

    """
    win = int(win_baseline * fs)
    if baseline == 'maximin':
        Flow = filters.gaussian_filter(F, [0., sig_baseline])
        Flow = filters.minimum_filter1d(Flow, win)
        Flow = filters.maximum_filter1d(Flow, win)
    elif baseline == 'constant':
        Flow = filters.gaussian_filter(F, [0., sig_baseline])
        Flow = np.amin(Flow)
    elif baseline == 'constant_prctile':
        Flow = np.percentile(F, prctile_baseline, axis=1)
        Flow = np.expand_dims(Flow, axis=1)
    else:
        Flow = 0.

    F = F - Flow

    return F
Exemple #24
0
def _make_noise_gradient_color(grad):
    """
    Make a noise gradient color.

    TODO: Improve noise gradient quality.

    Example:

        Descriptor(b'Grdn'){
            'Nm  ': 'Custom\x00',
            'GrdF': (b'GrdF', b'ClNs'),
            'ShTr': False,
            'VctC': False,
            'ClrS': (b'ClrS', b'RGBC'),
            'RndS': 3650322,
            'Smth': 2048,
            'Mnm ': [0, 0, 0, 0],
            'Mxm ': [0, 100, 100, 100]
        }
    """
    from scipy.ndimage.filters import maximum_filter1d, uniform_filter1d
    logger.debug('Noise gradient is not accurate.')
    roughness = grad.get(Key.Smoothness).value / 4096.  # Larger is sharper.
    maximum = np.array([x.value for x in grad.get(Key.Maximum)],
                       dtype=np.float32)
    minimum = np.array([x.value for x in grad.get(Key.Minimum)],
                       dtype=np.float32)
    seed = grad.get(Key.RandomSeed).value
    rng = np.random.RandomState(seed)
    Y = rng.binomial(1, .5, (256, len(maximum))).astype(np.float32)
    size = max(1, int(roughness))
    Y = maximum_filter1d(Y, size, axis=0)
    Y = uniform_filter1d(Y, size * 64, axis=0)
    Y = Y / np.max(Y, axis=0)
    Y = ((maximum - minimum) * Y + minimum) / 100.
    X = np.linspace(0, 1, 256, dtype=np.float32)
    if grad.get(Key.ShowTransparency):
        G = interpolate.interp1d(X,
                                 Y[:, :-1],
                                 axis=0,
                                 bounds_error=False,
                                 fill_value=(Y[0, :-1], Y[-1, :-1]))
        Ga = interpolate.interp1d(X,
                                  Y[:, -1],
                                  axis=0,
                                  bounds_error=False,
                                  fill_value=(Y[0, -1], Y[-1, -1]))
    else:
        G = interpolate.interp1d(X,
                                 Y[:, :3],
                                 axis=0,
                                 bounds_error=False,
                                 fill_value=(Y[0, :3], Y[-1, :3]))
        Ga = None
    return G, Ga
Exemple #25
0
def validate_signal(t, y, t_ref, y_ref, num=1000, dx=20, dy=0.1):
    """ Validate a signal y(t) against a reference signal y_ref(t_ref) by creating a band
    around y_ref and finding the values in y outside the band

    Parameters:

        t       time of the signal
        y       values of the signal
        t_ref   time of the reference signal
        y_ref   values of the reference signal
        num     number of samples for the band
        dx      horizontal width of the band in samples
        dy      vertical distance of the band to y_ref

    Returns:

        t_band  time values of the band
        y_min   lower limit of the band
        y_max   upper limit of the band
        i_out   indices of the values in y outside the band
    """

    from scipy.ndimage.filters import maximum_filter1d, minimum_filter1d

    # re-sample the reference signal into a uniform grid
    t_band = np.linspace(start=t_ref[0], stop=t_ref[-1], num=num)

    # sort out the duplicate samples before the interpolation
    m = np.concatenate(([True], np.diff(t_ref) > 0))

    y_band = np.interp(x=t_band, xp=t_ref[m], fp=y_ref[m])

    y_band_min = np.min(y_band)
    y_band_max = np.max(y_band)

    # calculate the width of the band
    if y_band_min == y_band_max:
        w = 0.5 if y_band_min == 0 else np.abs(y_band_min) * dy
    else:
        w = (y_band_max - y_band_min) * dy

    # calculate the lower and upper limits
    y_min = minimum_filter1d(input=y_band, size=dx) - w
    y_max = maximum_filter1d(input=y_band, size=dx) + w

    # find outliers
    y_min_i = np.interp(x=t, xp=t_band, fp=y_min)
    y_max_i = np.interp(x=t, xp=t_band, fp=y_max)
    i_out = np.logical_or(y < y_min_i, y > y_max_i)

    # do not count outliers outside the t_ref
    i_out = np.logical_and(i_out, t > t_band[0])
    i_out = np.logical_and(i_out, t < t_band[-1])

    return t_band, y_min, y_max, i_out
Exemple #26
0
def preprocessFeatureMat(X, Lfilt, logX=False, logXblur=False, capXblur=True, **sink):
	# if not capXblur:
	# 	X = localMaxFilterSimMat(X)
	# X = localMaxFilterSimMat(X)

	# Lfilt *= 2 # TODO remove after test
	featureMeans = np.mean(X, axis=1).reshape((-1, 1))
	if logX and logXblur:
		X *= -np.log2(featureMeans) # variable encoding costs for rows
		Xblur = filterSimMat(X, Lfilt, 'hamming', scaleFilterMethod='max1')
	if logX and not logXblur:
		Xblur = filterSimMat(X, Lfilt, 'hamming', scaleFilterMethod='max1')
		X *= -np.log2(featureMeans)
	if not logX and logXblur:
		Xblur = filterSimMat(X, Lfilt, 'hamming', scaleFilterMethod='max1')
		Xblur *= np.log2(featureMeans)
	if not logX and not logXblur:
		Xblur = filterSimMat(X, Lfilt, 'hamming', scaleFilterMethod='max1')
		# Xblur = filterSimMat(X, Lfilt, 'flat', scaleFilterMethod='max1')

	if capXblur: # don't make long stretches also have large values
		maxima = filters.maximum_filter1d(Xblur, Lfilt // 2, axis=1, mode='constant')
		# maxima = filters.maximum_filter1d(Xblur, Lfilt, axis=1, mode='constant')
		Xblur[maxima > 0] /= maxima[maxima > 0]
		# print "preprocessFeatureMat(): max value in Xblur: ", np.max(Xblur) # 1.0
		# import sys
		# sys.exit()
		# Xblur = np.minimum(Xblur, 1.)

	# plt.figure()
	# viz.imshowBetter(X)
	# plt.figure()
	# viz.imshowBetter(Xblur)

	# have columns be adjacent in memory
	X = np.asfortranarray(X)
	Xblur = np.asfortranarray(Xblur)

	# assert(np.all((X > 0.) <= (Xblur > 0.)))

	assert(np.all(np.sum(X, axis=1) > 0))
	assert(np.all(np.sum(Xblur, axis=1) > 0))

	# if np.max(X) > 1.:
	# 	print "X min"

	print "preprocessFeatureMat(): X shape, logX, logXblur", X.shape, logX, logXblur
	print "preprocessFeatureMat(): Lfilt", Lfilt
	print "preprocessFeatureMat(): X min, max", X.min(), X.max()
	print "preprocessFeatureMat(): Xblur min, max", Xblur.min(), Xblur.max()

	# import sys
	# sys.exit()

	return X, Xblur
Exemple #27
0
def filter1d_same(a: np.ndarray, W: int, max_or_min: str, fillna=np.nan):
    out_dtype = np.full(0, fillna).dtype
    hW = (W - 1) // 2  # Half window size
    if max_or_min == 'max':
        out = maximum_filter1d(a, size=W, origin=hW)
    else:
        out = minimum_filter1d(a, size=W, origin=hW)
    if out.dtype is out_dtype:
        out[:W - 1] = fillna
    else:
        out = np.concatenate((np.full(W - 1, fillna), out[W - 1:]))
    return out
def process_cut(spect, stddevs=3, ignore=2):
    from scipy.ndimage.filters import maximum_filter1d
    # "loudness" curve
    loud = np.log(np.sum(np.exp(spect), axis=1))
    # normieren
    nloud = (loud - np.mean(loud)) / np.std(loud)
    lowloud = nloud < -stddevs
    # schauen, ob innerhalb von 3 stddevs, Klicks darin (bis zu 2 Frames lang) ignorieren.
    fltloud = ~maximum_filter1d(lowloud, ignore + 1)
    where_ok = np.where(fltloud)[0]
    cut_front = np.min(where_ok)
    cut_back = np.max(where_ok)
    return cut_front, cut_back + 1
Exemple #29
0
    def _gradient_nms(self, accumulated_grad, win_size=3):
        '''
        Non-maximum suppression to find local maximum of accumulated gradient.

        @accumulated_grad:
            np.array, input accumulated gradient.
        @win_size:
            int, sliding window length. It decides how much you refer to neighbor field values.
        @return:
            list, local maximum accumulated gradient indexs.
        '''
        indexs = []
        maximas = maximum_filter1d(accumulated_grad, win_size)
        for i in range(len(accumulated_grad)):
            if accumulated_grad[i] > 0 and abs(accumulated_grad[i] -
                                               maximas[i]) < 1e-6:
                indexs.append(i)
        return indexs
def preprocess(F,ops):
    sig = ops['sig_baseline']
    win = int(ops['win_baseline']*ops['fs'])
    if ops['baseline']=='maximin':
        Flow = filters.gaussian_filter(F,    [0., sig])
        Flow = filters.minimum_filter1d(Flow,    win)
        Flow = filters.maximum_filter1d(Flow,    win)
    elif ops['baseline']=='constant':
        Flow = filters.gaussian_filter(F,    [0., sig])
        Flow = np.amin(Flow)
    elif ops['baseline']=='constant_prctile':
        Flow = np.percentile(F, ops['prctile_baseline'], axis=1)
        Flow = np.expand_dims(Flow, axis = 1)
    else:
        Flow = 0.

    F = F - Flow

    return F
Exemple #31
0
def generate_Lip_Tragectory(phrase):
    A = "espeak -z -s 80 -v female5 -w test.wav "
    A = A + "'" + phrase + "'"
    #os.system("espeak -z -s 80 -v female5 -w test.wav 'Hey, why no one is looking at me? I feel neglected. I feel it! I am afraid!' ")
    os.system(A)
    samplerate, data = wavfile.read('test.wav')
    dt = 1 / float(samplerate)
    times = np.arange(len(data)) / float(samplerate)
    N = len(times)
    max_data = maximum_filter1d(data, size=1000)
    max_data = gaussian_filter(max_data, sigma=100)
    max_Amplitude = 10
    Amplitude = np.round(max_Amplitude * (max_data / np.max(max_data)))
    Extrema = argrelextrema(max_data, np.less_equal, order=100)[0]
    Amplitude = Amplitude[Extrema]
    Sleep_Time = []
    for i in range(len(Extrema)):
        if (i + 1 < len(Extrema)):
            Sleep_Time.append(dt * (times[Extrema[i + 1]] - times[Extrema[i]]))

    return np.array(Amplitude), np.array(Sleep_Time), Extrema
Exemple #32
0
def preprocess(F, ops):
    """ preprocesses fluorescence traces for spike deconvolution

    baseline-subtraction with window 'win_baseline'
    
    Parameters
    ----------------

    F : float, 2D array
        size [neurons x time], in pipeline uses neuropil-subtracted fluorescence

    ops : dictionary
        'baseline', 'win_baseline', 'sig_baseline', 'fs',
        (optional 'prctile_baseline' needed if ops['baseline']=='constant_prctile')
    
    Returns
    ----------------

    F : float, 2D array
        size [neurons x time], baseline-corrected fluorescence

    """
    sig = ops['sig_baseline']
    win = int(ops['win_baseline'] * ops['fs'])
    if ops['baseline'] == 'maximin':
        Flow = filters.gaussian_filter(F, [0., sig])
        Flow = filters.minimum_filter1d(Flow, win)
        Flow = filters.maximum_filter1d(Flow, win)
    elif ops['baseline'] == 'constant':
        Flow = filters.gaussian_filter(F, [0., sig])
        Flow = np.amin(Flow)
    elif ops['baseline'] == 'constant_prctile':
        Flow = np.percentile(F, ops['prctile_baseline'], axis=1)
        Flow = np.expand_dims(Flow, axis=1)
    else:
        Flow = 0.

    F = F - Flow

    return F
  # Compute errors.
  max_errors = []
  av_errors = []

  for color in range(0, 3):
    max_error = np.zeros(6)
    av_error = np.zeros(6)

    for c in range(0, num_checkers):
      # Get min. absolute diff to envelopes.
      env_diff = np.minimum(np.fabs(checker_int[color][c, :] - envelopes[color][:, 0]),
                            np.fabs(checker_int[color][c, :] - envelopes[color][:, 2]))

      # Reject values within vincinity of envelopes.
      reject_mask = img_filters.maximum_filter1d(np.int_(env_diff <= 0.02), 7)

      if np.min(reject_mask) != 1:
         int_vals = checker_int[color][c, :];
         int_vals = int_vals[reject_mask != 1]
         median = np.median(int_vals)
         max_error[c] = np.max(np.fabs(int_vals - median))
         av_error[c] = np.std(int_vals)

    max_errors.append(max_error)
    av_errors.append(av_error)

  color_name = [ 'red', 'green', 'blue' ]

  print "Average error: "
  for color in range(0, 3):
Exemple #34
0
    def detect(self, threshold, combine=0.03, pre_avg=0.15, pre_max=0.01,
               post_avg=0, post_max=0.05, delay=0):
        """
        Detects the onsets.

        :param threshold: threshold for peak-picking
        :param combine:   only report 1 onset for N seconds
        :param pre_avg:   use N seconds past information for moving average
        :param pre_max:   use N seconds past information for moving maximum
        :param post_avg:  use N seconds future information for moving average
        :param post_max:  use N seconds future information for moving maximum
        :param delay:     report the onset N seconds delayed

        In online mode, post_avg and post_max are set to 0.

        Implements the peak-picking method described in:

        "Evaluating the Online Capabilities of Onset Detection Methods"
        Sebastian Böck, Florian Krebs and Markus Schedl
        Proceedings of the 13th International Society for Music Information
        Retrieval Conference (ISMIR), 2012

        """
        # online mode?
        if self.online:
            post_max = 0
            post_avg = 0
        # convert timing information to frames
        pre_avg = int(round(self.fps * pre_avg))
        pre_max = int(round(self.fps * pre_max))
        post_max = int(round(self.fps * post_max))
        post_avg = int(round(self.fps * post_avg))
        # convert to seconds
        combine /= 1000.
        delay /= 1000.
        # init detections
        self.detections = []
        # moving maximum
        max_length = pre_max + post_max + 1
        max_origin = int(np.floor((pre_max - post_max) / 2))
        mov_max = maximum_filter1d(self.activations, max_length,
                                   mode='constant', origin=max_origin)
        # moving average
        avg_length = pre_avg + post_avg + 1
        avg_origin = int(np.floor((pre_avg - post_avg) / 2))
        mov_avg = uniform_filter1d(self.activations, avg_length,
                                   mode='constant', origin=avg_origin)
        # detections are activation equal to the moving maximum
        detections = self.activations * (self.activations == mov_max)
        # detections must be greater or equal than the mov. average + threshold
        detections *= (detections >= mov_avg + threshold)
        # convert detected onsets to a list of timestamps
        detections = np.nonzero(detections)[0].astype(np.float) / self.fps
        # shift if necessary
        if delay != 0:
            detections += delay
        # always use the first detection and all others if none was reported
        # within the last `combine` seconds
        if detections.size > 1:
            # filter all detections which occur within `combine` seconds
            combined_detections = detections[1:][np.diff(detections) > combine]
            # add them after the first detection
            self.detections = np.append(detections[0], combined_detections)
        else:
            self.detections = detections