Exemplo n.º 1
0
def move_nanmax_filter(arr, window, axis=-1):
    "Moving window maximium ignoring NaNs, implemented with a filter."
    global maximum_filter1d, convolve1d
    if maximum_filter1d is None:
        try:
            from scipy.ndimage import maximum_filter1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if convolve1d is None:
        try:
            from scipy.ndimage import convolve1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if axis == None:
        raise ValueError, "An `axis` value of None is not supported."
    if window < 1:  
        raise ValueError, "`window` must be at least 1."
    if window > arr.shape[axis]:
        raise ValueError, "`window` is too long."
    arr = arr.astype(float)
    nrr = np.isnan(arr)
    arr[nrr] = -np.inf
    x0 = (window - 1) // 2
    maximum_filter1d(arr, window, axis=axis, mode='constant', cval=np.nan,
                     origin=x0, output=arr)
    w = np.ones(window, dtype=int)
    nrr = nrr.astype(int)
    x0 = (1 - window) // 2
    convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
               output=nrr)
    arr[nrr == window] = np.nan
    return arr
Exemplo n.º 2
0
def test_multiple_modes_sequentially():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying the filters with
    # different modes sequentially
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    modes = ['reflect', 'wrap']

    expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
    expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.gaussian_filter(arr, 1, mode=modes))

    expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
    expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.uniform_filter(arr, 5, mode=modes))

    expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.maximum_filter(arr, size=5, mode=modes))

    expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.minimum_filter(arr, size=5, mode=modes))
Exemplo n.º 3
0
def test_minmaximum_filter1d():
    # Regression gh-3898
    in_ = np.arange(10)
    out = sndi.minimum_filter1d(in_, 1)
    assert_equal(in_, out)
    out = sndi.maximum_filter1d(in_, 1)
    assert_equal(in_, out)
    # Test reflect
    out = sndi.minimum_filter1d(in_, 5, mode='reflect')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
    out = sndi.maximum_filter1d(in_, 5, mode='reflect')
    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
    #Test constant
    out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
    assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
    out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
    assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
    # Test nearest
    out = sndi.minimum_filter1d(in_, 5, mode='nearest')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
    out = sndi.maximum_filter1d(in_, 5, mode='nearest')
    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
    # Test wrap
    out = sndi.minimum_filter1d(in_, 5, mode='wrap')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
    out = sndi.maximum_filter1d(in_, 5, mode='wrap')
    assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_multiple_modes_sequentially():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying the filters with
    # different modes sequentially
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    modes = ['reflect', 'wrap']

    expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
    expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.gaussian_filter(arr, 1, mode=modes))

    expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
    expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.uniform_filter(arr, 5, mode=modes))

    expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.maximum_filter(arr, size=5, mode=modes))

    expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.minimum_filter(arr, size=5, mode=modes))
Exemplo n.º 5
0
def move_max_filter(arr, window, axis=-1):
    "Moving window maximium implemented with a filter."
    arr = np.array(arr, copy=False)
    global maximum_filter1d
    if maximum_filter1d is None:
        try:
            from scipy.ndimage import maximum_filter1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if axis == None:
        raise ValueError("An `axis` value of None is not supported.")
    if window < 1:
        raise ValueError("`window` must be at least 1.")
    if window > arr.shape[axis]:
        raise ValueError("`window` is too long.")
    y = arr.astype(float)
    x0 = (window - 1) // 2
    maximum_filter1d(y,
                     window,
                     axis=axis,
                     mode='constant',
                     cval=np.nan,
                     origin=x0,
                     output=y)
    return y
Exemplo n.º 6
0
def test_minmaximum_filter1d():
    # Regression gh-3898
    in_ = np.arange(10)
    out = sndi.minimum_filter1d(in_, 1)
    assert_equal(in_, out)
    out = sndi.maximum_filter1d(in_, 1)
    assert_equal(in_, out)
    # Test reflect
    out = sndi.minimum_filter1d(in_, 5, mode='reflect')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
    out = sndi.maximum_filter1d(in_, 5, mode='reflect')
    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
    #Test constant
    out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
    assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
    out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
    assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
    # Test nearest
    out = sndi.minimum_filter1d(in_, 5, mode='nearest')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
    out = sndi.maximum_filter1d(in_, 5, mode='nearest')
    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
    # Test wrap
    out = sndi.minimum_filter1d(in_, 5, mode='wrap')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
    out = sndi.maximum_filter1d(in_, 5, mode='wrap')
    assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
Exemplo n.º 7
0
def merge_channels(data, sampling_rate, filter_data: bool = True):
    """Merge channels based on a running maximum.

    Args:
        data (ndarray): [samples, channels]
        sampling_rate (num): in Hz

    Returns:
        ndarray: merged across
    """
    data = np.array(
        data
    )  # ensure data is an np.array (and not dask) - otherwise np.interp will fail
    mask = ~np.isfinite(data)  # remove all nan/inf data
    data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask),
                           data[~mask])
    # band-pass filter out noise on each channel
    b, a = scipy.signal.butter(6, (25, 1500),
                               btype='bandpass',
                               fs=sampling_rate)
    data = scipy.signal.filtfilt(b, a, data, axis=0, method='pad')
    # find loudest channel in 101-sample windows
    if filter_data:
        sng_max = maximum_filter1d(np.abs(data), size=101, axis=0)
        loudest_channel = np.argmax(sng_max, axis=-1)
    # get linear index and merge channels
    idx = np.ravel_multi_index((np.arange(sng_max.shape[0]), loudest_channel),
                               data.shape)
    data_merged_max = data.ravel()[idx]
    data_merged_max = data_merged_max[:, np.
                                      newaxis]  # shape needs to be [nb_samples, 1]
    return data_merged_max
def extract_audio_features(filename, peaks, freq_range, thresh, downsample):
    """
    Loads some audio data, for each peak, filter
    within the freq_range, and threshold 0/1 at thresh.
    :param filename:
    :param peaks:
    :param freq_range:
    :param thresh:
    :return:
    """
    audio_samples, sr = sf.read(filename)
    peak_series = {}
    for freq in peaks:
        filtered_audio = butter_bandpass_filter(audio_samples,
                                                freq - freq_range,
                                                freq + freq_range, sr)
        high_values = filtered_audio > thresh
        low_values = filtered_audio <= thresh
        filtered_audio[high_values] = 1
        filtered_audio[low_values] = 0
        filtered_audio = filtered_audio[::int(downsample / 10)]
        filtered_audio = maximum_filter1d(filtered_audio, size=5)
        filtered_audio = filtered_audio[::10]
        if freq == 12000:
            neural_start = np.argwhere(filtered_audio == 1)[0][0]
            neural_end = np.argwhere(filtered_audio == 1)[-1][0]
            filtered_audio[neural_start:neural_end] = 1
        peak_series[freq] = np.array(filtered_audio)
    peak_series['audio_length'] = len(audio_samples) / float(sr)
    peak_series['sr'] = sr
    return peak_series
Exemplo n.º 9
0
def find_nearest_object(mmp, im_labeled, slice_map, i, labels_center_column):
    """
    mmp : mask
    im_labeled : label
    i : object to be connected
    labels_center_column : known objects
    """
    thre = 40 # threshold # of pixels (in y-direction) to detect adjacent object
    steps = [5, 10, 20, 40, 80]

    sl_y, sl_x = slice_map[i]

    # right side
    ss = im_labeled[:,sl_x.stop-3:sl_x.stop].max(axis=1)
    ss_msk = ni.maximum_filter1d(ss == i, thre)

    if sl_x.stop < 2048/2.:
        sl_x0 = sl_x.stop
        sl_x_pos = [sl_x.stop + s for s in steps]
    else:
        sl_x0 = sl_x.start
        sl_x_pos = [sl_x.start - s for s in steps]

    for pos in sl_x_pos:
        ss1 = im_labeled[:,pos]
        detected_ob = set(np.unique(ss1[ss_msk])) - set([0])
        for ob_id in detected_ob:
            if ob_id in labels_center_column:
                sl = slice_map[ob_id][1]
                if sl.start < sl_x0 < sl.stop:
                    continue
                else:
                    return ob_id
Exemplo n.º 10
0
def maxNx1d(src, lengths):
    """ND maximum filter using 1D kernels

    """
    for d in range(len(lengths)):
        L = src.shape[d]
        kernel_width = lengths[d]
        if (kernel_width % 2) != 1:
            raise NotImplementedError('maxNx1d on even-length %d kernel' % lengths[d])
        kernel_radius = kernel_width//2

        if kernel_radius < 1:
            print("warning: dimension %d kernel %d is too small, has no effect" % (d, kernel_width))
            continue
        elif kernel_width > L:
            raise ValueError("dimension %d length %d too small for kernel %d" % (d, L, kernel_width))

        src = ndimage.maximum_filter1d(
            src,
            lengths[d],
            mode='constant',
            axis=d
        )

        # trim off invalid borders
        src = src[ tuple([slice(None) for j in range(d)] + [slice(kernel_radius,kernel_radius and -kernel_radius or None)] + [ Ellipsis ]) ]

    return src
Exemplo n.º 11
0
def find_nearest_object(mmp, im_labeled, slice_map, i, labels_center_column):
    """
    mmp : mask
    im_labeled : label
    i : object to be connected
    labels_center_column : known objects
    """
    thre = 40  # threshold # of pixels (in y-direction) to detect adjacent object
    steps = [5, 10, 20, 40, 80]

    sl_y, sl_x = slice_map[i]

    # right side
    ss = im_labeled[:, sl_x.stop - 3:sl_x.stop].max(axis=1)
    ss_msk = ni.maximum_filter1d(ss == i, thre)

    if sl_x.stop < 2048 / 2.:
        sl_x0 = sl_x.stop
        sl_x_pos = [sl_x.stop + s for s in steps]
    else:
        sl_x0 = sl_x.start
        sl_x_pos = [sl_x.start - s for s in steps]

    for pos in sl_x_pos:
        ss1 = im_labeled[:, pos]
        detected_ob = set(np.unique(ss1[ss_msk])) - set([0])
        for ob_id in detected_ob:
            if ob_id in labels_center_column:
                sl = slice_map[ob_id][1]
                if sl.start < sl_x0 < sl.stop:
                    continue
                else:
                    return ob_id
Exemplo n.º 12
0
def plot_max_min(rewards, labels, sigma, save=False, filename=''):
    '''
    Like plot_iterations, but plots max and min instead of stds
    '''
    plt.figure(figsize=figure_size)
    ax = plt.gca()
    for i in range(len(rewards)):
        rewards[i] = rewards[i].squeeze()
        color = next(ax._get_lines.prop_cycler)['color']
        rewards_mean = np.mean(rewards[i], axis=(0, 2))
        rewards_mean = gaussian_filter(rewards_mean, sigma=sigma)
        rewards_min = np.min(rewards[i], axis=(0, 2))
        rewards_min = minimum_filter1d(rewards_min, sigma)

        rewards_max = np.max(rewards[i], axis=(0, 2))
        rewards_max = maximum_filter1d(rewards_max, sigma)
        plt.plot(rewards_mean, color=color, label=labels[i])
        plt.fill_between(range(rewards_mean.size),
                         rewards_min,
                         rewards_max,
                         alpha=0.3)
        # plt.plot(gaussian_filter(rewards_min, 100), color = color, linestyle = 'dashed')
        # plt.plot(gaussian_filter(rewards_max, 100), color = color, linestyle = 'dashed')
    plt.xlabel('Episodes')
    plt.ylabel('Rewards')
    plt.legend()
    if save:
        plt.savefig(filename)
    plt.show()
Exemplo n.º 13
0
    def smoothing(self, data, sigma):
        """
        Smooth out 1D dataset with a gaussian kernel

        Parameters
        ----------
        data: 2D array
            x-y coordinates to be smoothed
        sigma: float
            STD of the smoothing gaussian

        Returns
        -------
        dnew: 2D array
            smoothed x-y coordinates
        """
        x, y = data
        x = ndimage.minimum_filter1d(x, sigma)
        x = ndimage.maximum_filter1d(x, 4 * sigma)
        # filtered = ndimage.gaussian_filter1d(x, sigma, mode='reflect')
        #filtered[:2 * sigma] = x[:2 * sigma]
        #filtered[-2 * sigma:] = x[-2 * sigma:]
        dnew = x, y

        plot = False
        if plot:
            fig = plt.figure()
            ax = fig.add_subplot(111)
            # ax.plot(filtered, lw=0.5)
            #ax.plot(x, lw=0.5)
            # ax.set_xlim([900, 2500])
            fig.savefig('smooting_debug.png', dpi=300)
        return dnew
Exemplo n.º 14
0
def get_mask(y, rpad=20, nmax=20):
    xp = y[1,:,:].flatten().astype('int32')
    yp = y[0,:,:].flatten().astype('int32')
    _, Ly, Lx = y.shape
    xm, ym = np.meshgrid(np.arange(Lx),  np.arange(Ly))

    xedges = np.arange(-.5-rpad, xm.shape[1]+.5+rpad, 1)
    yedges = np.arange(-.5-rpad, xm.shape[0]+.5+rpad, 1)
    #xp = (xm-dx).flatten().astype('int32')
    #yp = (ym-dy).flatten().astype('int32')
    h,_,_ = np.histogram2d(xp, yp, bins=[xedges, yedges])

    hmax = maximum_filter1d(h, 5, axis=0)
    hmax = maximum_filter1d(hmax, 5, axis=1)

    yo, xo = np.nonzero(np.logical_and(h-hmax>-1e-6, h>10))
    Nmax = h[yo, xo]
    isort = np.argsort(Nmax)[::-1]
    yo, xo = yo[isort], xo[isort]
    pix = []
    for t in range(len(yo)):
        pix.append([yo[t],xo[t]])

    for iter in range(5):
        for k in range(len(pix)):
            ye, xe = extendROI(pix[k][0], pix[k][1], h.shape[0], h.shape[1], 1)
            igood = h[ye, xe]>2
            ye, xe = ye[igood], xe[igood]
            pix[k][0] = ye
            pix[k][1] = xe

    ibad = np.ones(len(pix), 'bool')
    for k in range(len(pix)):
        if pix[k][0].size<nmax:
            ibad[k] = 0

    #pix = [pix[k] for k in ibad.nonzero()[0]]

    M = np.zeros(h.shape)
    for k in range(len(pix)):
        M[pix[k][0],    pix[k][1]] = 1+k

    M0 = M[rpad + xp, rpad + yp]
    M0 = np.reshape(M0, xm.shape)
    return M0, pix
Exemplo n.º 15
0
def _envelope(y, rate, threshold):
    mask = []
    y_mean = maximum_filter1d(np.abs(y), mode="constant", size=rate//20)
    for mean in y_mean:
        if mean > threshold:
            mask.append(True)
        else:
            mask.append(False)
    return mask, y_mean
Exemplo n.º 16
0
def plot(re,gt,vmax,vmin):
    
    Plot_data=pd.DataFrame(re,columns=['DP','BD','E','FS','A','RC'])
    #max filter data
    filtered_data=Plot_data.copy()
    for index, row in filtered_data.iteritems():
        filtered_data[index]=maximum_filter1d(filtered_data[index],mode='nearest',size= 3)
        
    def plot_predict_result(df):
        y=[x for x in range(1,7)]
        ax.plot(y, gt, label='ground truth',color='green')
        ax.plot(y, np.mean(df,axis=0), label='mean predict',color='blue')
#         ax.plot(y, np.median(df,axis=0), label='median predict',color='red')
        ax.plot(y, np.round(np.mean(df,axis=0)), label='mean round predict',color='black')
        plt.xticks(y)
        plt.yticks(y_range)
        labels=['DP','BD','E','FS','A','RC']
        ax.set_xticklabels(labels)
        ax.legend()    
        it=1
        for types in ['DP','BD','E','FS','A','RC']:
            Range=vmax[types]-vmin[types]
            rect = Rectangle((it, vmin[types]), 0.2, Range, color='green')
            ax.add_patch(rect)   
            it=it+1
    
    x_range = range(0,len(Plot_data),1)
    y_range = np.linspace(1,5,num=9)    
        
    # raw data    
    ax=plt.figure(figsize=(20,20))
    ax = plt.subplot(421)
    Plot_data.plot(ax=ax)
    ax.legend(['DP','BD','E','FS','A','RC'],bbox_to_anchor=(1,1))
    plt.ylabel('Gear Score')
    plt.xlabel('Video clips length')
    

    plt.xticks(x_range)
    plt.yticks(y_range)

    # filtered data   
    ax = plt.subplot(422)
    filtered_data.plot(ax=ax)
    ax.legend(['DP','BD','E','FS','A','RC'],bbox_to_anchor=(1,1))
    plt.ylabel('Gear Score')
    plt.xlabel('Video clips length')
    plt.xticks(x_range)
    plt.yticks(y_range)
    
    
    ax = plt.subplot(423)
    plot_predict_result(Plot_data)

    ax = plt.subplot(424)
    plot_predict_result(filtered_data)
Exemplo n.º 17
0
def move_nanmax_filter(arr, window, axis=-1):
    "Moving window maximium ignoring NaNs, implemented with a filter."
    arr = np.array(arr, copy=False)
    global maximum_filter1d, convolve1d
    if maximum_filter1d is None:
        try:
            from scipy.ndimage import maximum_filter1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if convolve1d is None:
        try:
            from scipy.ndimage import convolve1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if axis == None:
        raise ValueError("An `axis` value of None is not supported.")
    if window < 1:
        raise ValueError("`window` must be at least 1.")
    if window > arr.shape[axis]:
        raise ValueError("`window` is too long.")
    arr = arr.astype(float)
    nrr = np.isnan(arr)
    arr[nrr] = -np.inf
    x0 = (window - 1) // 2
    maximum_filter1d(arr,
                     window,
                     axis=axis,
                     mode='constant',
                     cval=np.nan,
                     origin=x0,
                     output=arr)
    w = np.ones(window, dtype=int)
    nrr = nrr.astype(int)
    x0 = (1 - window) // 2
    convolve1d(nrr,
               w,
               axis=axis,
               mode='constant',
               cval=0,
               origin=x0,
               output=nrr)
    arr[nrr == window] = np.nan
    return arr
Exemplo n.º 18
0
def move_max_filter(arr, window, axis=-1):
    "Moving window maximium implemented with a filter."
    global maximum_filter1d
    if maximum_filter1d is None:
        try:
            from scipy.ndimage import maximum_filter1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if axis == None:
        raise ValueError, "An `axis` value of None is not supported."
    if window < 1:  
        raise ValueError, "`window` must be at least 1."
    if window > arr.shape[axis]:
        raise ValueError, "`window` is too long."  
    y = arr.astype(float)
    x0 = (window - 1) // 2
    maximum_filter1d(y, window, axis=axis, mode='constant', cval=np.nan,
                     origin=x0, output=y)
    return y
Exemplo n.º 19
0
def segmentation_1d(signal, size=10):
    absolute_val = np.abs(signal)
    maxs = ndi.maximum_filter1d(absolute_val, size)
    zeros = np.zeros(len(maxs))
    if np.all(zeros == maxs):
        return zeros
    else:
        thresh = filters.threshold_otsu(maxs)
        binary = maxs > thresh
    return binary
Exemplo n.º 20
0
def average_absolute_discrete_group_delay(mat_phase):
#    print(mat_phase)
    dgd = discrete_group_delay(mat_phase)
    
    temp = ndimage.maximum_filter1d(numpy.abs(dgd), 5, axis=0, output=None, mode="constant", cval=0.0, origin=2)
#    temp = bn.move_max(numpy.abs(dgd), 5, axis=0)
#    temp = maxfilt.maxfilt1d(numpy.abs(dgd), 5) # meine implementierung
#    print(temp)
#    scipy.io.savemat('e:\\dropbox\\applesoup\\knietrommler\\v2\\python_export_dgd.mat', {'dgd':dgd, 'maxfilt_output':temp})
    return numpy.mean(temp, axis=0)
Exemplo n.º 21
0
def local_extrema_2(arr, min_distance = 20):
    """Find all local maxima of the array, separated by at least min_distance."""
    cval = 0
    mode = 'constant'
    cval = arr.max()+1
    max_points = arr == maximum_filter1d(arr, min_distance)
    min_points = arr == minimum_filter1d(arr, min_distance)
    

    return [arr[nonzero(min_points==True)[0]],nonzero(min_points==True)[0], 
            arr[nonzero(max_points==True)[0]],nonzero(max_points==True)[0]]
Exemplo n.º 22
0
def get_localmaxs(array, count=3, window=25):
    """
    array must be one-dimensional
    Individualize the locals maxs relatives to a window of the given length.
    Returns theirs coodenates.
    """
    maxs = maximum_filter1d(array, window)
    maxs[maxs > array] = array.min()
    graph(array, maxs)
    top_positions = maxs.argsort()[::-1][:count]
    return sorted(top_positions)
Exemplo n.º 23
0
def get_localmaxs(array, count=3, window=25):
    """
    array must be one-dimensional
    Individualize the locals maxs relatives to a window of the given length.
    Returns theirs coodenates.
    """
    maxs = maximum_filter1d(array, window)
    maxs[maxs > array] = array.min()
    graph(array, maxs)
    top_positions = maxs.argsort()[::-1][:count]
    return sorted(top_positions)
def get_clips(series, threshold, warmup=30, cooldown=30):
    maxed_series = maximum_filter1d(series,
                                    warmup + cooldown + 1,
                                    origin=(-warmup + cooldown) / 2)
    moving_frames = (maxed_series > threshold).astype('float')
    moving_frames[0] = 0
    moving_frames[-1] = 0
    on_off = np.diff(moving_frames)
    starts = np.where(on_off == 1)[0]
    stops = np.where(on_off == -1)[0]

    return zip(starts, stops)
Exemplo n.º 25
0
    def blind_solve(image, trace_half_height=5):
        # Find the peaks of each of the traces using a max filter
        peaks = ndimage.maximum_filter1d(image.data.data,
                                         size=MIN_TRACE_SEPARATION,
                                         axis=0)
        significant = (image.data.data /
                       image.uncertainty) > SIGNAL_TO_NOISE_TRACING_CUTOFF
        # ignore pixels in the bpm
        significant = np.logical_and(significant, image.mask == 0)
        # identify the traces.
        binary_map = np.logical_and(peaks == image.data.data, significant)

        # Dilate the label map to make sure all traces are connected
        binary_map = ndimage.morphology.binary_dilation(binary_map)
        labeled_image, n_labels = ndimage.label(binary_map)
        X, Y = np.meshgrid(np.arange(image.shape[1], dtype=int),
                           np.arange(image.shape[0], dtype=int))
        labeled_indices = np.arange(1, n_labels + 1)

        # Find the widths of the traces by finding the min and max x position
        x_maxes = ndimage.labeled_comprehension(X, labeled_image,
                                                labeled_indices, np.max, float,
                                                None).astype(int)
        x_mins = ndimage.labeled_comprehension(X, labeled_image,
                                               labeled_indices, np.min, float,
                                               None).astype(int)
        # Pick out only features that are wide like traces and span the center
        # Note labeled_indices is one indexed
        trace_xextents_ok = np.logical_and(
            x_maxes > (image.shape[1] // 2 + MIN_TRACE_HALF_WIDTH), x_mins <
            (image.shape[1] // 2 - MIN_TRACE_HALF_WIDTH))
        # and remove any traces whose centers are close (by a half width) to the top or bottom of the detector
        y_maxes = ndimage.labeled_comprehension(Y, labeled_image,
                                                labeled_indices, np.max, float,
                                                None).astype(int)
        y_mins = ndimage.labeled_comprehension(Y, labeled_image,
                                               labeled_indices, np.min, float,
                                               None).astype(int)
        trace_centers_not_near_edge = np.logical_and(
            y_maxes < (image.shape[0] - trace_half_height),
            y_mins > trace_half_height)
        true_labels = labeled_indices[np.logical_and(
            trace_xextents_ok, trace_centers_not_near_edge)]
        # Reset the values that are not actually in traces
        labeled_image[np.logical_not(np.isin(labeled_image, true_labels))] = 0

        # Reindex the labels to start at 1
        for i, label in enumerate(true_labels):
            labeled_image[labeled_image == label] = i + 1
        return labeled_image
Exemplo n.º 26
0
def get_y_derivativemap(flat, flat_bpix, bg_std_norm,
                        max_sep_order=150, pad=50,
                        med_filter_size=(7, 7),
                        flat_mask=None):

    """
    flat
    flat_bpix : bpix'ed flat
    """

    # 1d-derivatives along y-axis : 1st attempt
    # im_deriv = ni.gaussian_filter1d(flat, 1, order=1, axis=0)

    # 1d-derivatives along y-axis : 2nd attempt. Median filter first.

    flat_deriv_bpix = ni.gaussian_filter1d(flat_bpix, 1,
                                           order=1, axis=0)

    # We also make a median-filtered one. This one will be used to make masks.
    flat_medianed = ni.median_filter(flat,
                                     size=med_filter_size)

    flat_deriv = ni.gaussian_filter1d(flat_medianed, 1,
                                      order=1, axis=0)

    # min/max filter

    flat_max = ni.maximum_filter1d(flat_deriv, size=max_sep_order, axis=0)
    flat_min = ni.minimum_filter1d(flat_deriv, size=max_sep_order, axis=0)

    # mask for aperture boundray
    if pad is None:
        sl=slice()
    else:
        sl=slice(pad, -pad)

    flat_deriv_masked = np.zeros_like(flat_deriv)
    flat_deriv_masked[sl,sl] = flat_deriv[sl, sl]

    if flat_mask is not None:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5) & flat_mask
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5) & flat_mask
    else:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5)
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5)

    return dict(data=flat_deriv, #_bpix,
                pos_mask=flat_deriv_pos_msk,
                neg_mask=flat_deriv_neg_msk,
                )
Exemplo n.º 27
0
def get_y_derivativemap(flat,
                        flat_bpix,
                        bg_std_norm,
                        max_sep_order=150,
                        pad=50,
                        med_filter_size=(7, 7),
                        flat_mask=None):
    """
    flat
    flat_bpix : bpix'ed flat
    """

    # 1d-derivatives along y-axis : 1st attempt
    # im_deriv = ni.gaussian_filter1d(flat, 1, order=1, axis=0)

    # 1d-derivatives along y-axis : 2nd attempt. Median filter first.

    flat_deriv_bpix = ni.gaussian_filter1d(flat_bpix, 1, order=1, axis=0)

    # We also make a median-filtered one. This one will be used to make masks.
    flat_medianed = ni.median_filter(flat, size=med_filter_size)

    flat_deriv = ni.gaussian_filter1d(flat_medianed, 1, order=1, axis=0)

    # min/max filter

    flat_max = ni.maximum_filter1d(flat_deriv, size=max_sep_order, axis=0)
    flat_min = ni.minimum_filter1d(flat_deriv, size=max_sep_order, axis=0)

    # mask for aperture boundray
    if pad is None:
        sl = slice()
    else:
        sl = slice(pad, -pad)

    flat_deriv_masked = np.zeros_like(flat_deriv)
    flat_deriv_masked[sl, sl] = flat_deriv[sl, sl]

    if flat_mask is not None:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5) & flat_mask
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5) & flat_mask
    else:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5)
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5)

    return dict(
        data=flat_deriv,  #_bpix,
        pos_mask=flat_deriv_pos_msk,
        neg_mask=flat_deriv_neg_msk,
    )
Exemplo n.º 28
0
def peak_corr(pulse, receiver, name):
    corr = np.correlate(receiver, pulse, mode='valid')
    corr_abs = np.abs(corr)
    corr_max = ndimage.maximum_filter1d(corr_abs, L // 8, mode='constant')
    corr_min = ndimage.minimum_filter1d(corr_abs, L // 8, mode='constant')
    indics_mask = (corr_abs == corr_max) & (corr_max - corr_min >=
                                            .3 * signal_power * L)
    indics = np.argwhere(indics_mask).flatten()
    plt.figure(figsize=(15, 5))
    # plot received signal
    plt.subplot(121)
    plt.plot(receiver)
    plt.title(f'{name} pulse receiver')
    # plot absolute correlation
    plt.subplot(122)
    plt.plot(corr_abs)
    plt.title(
        f'absolute correlation of {name} pulse\nestimated delays: {indics}')
Exemplo n.º 29
0
def get_clips(series, threshold, warmup=30, cooldown=30):
    """
    Given a time series, a threshold, detect preiods of motion and return
    a list of (start_frame, end_frame) tuples.
    
    A motion period is padded by `warmup` frames before and `cooldown` frames
    after with motion levels below `threshold`.
    """
    maxed_series = maximum_filter1d(series,
                                    warmup + cooldown + 1,
                                    origin=(-warmup + cooldown) / 2)
    moving_frames = (maxed_series > threshold).astype('float')
    moving_frames[0] = 0
    moving_frames[-1] = 0
    on_off = np.diff(moving_frames)
    starts = np.where(on_off == 1)[0]
    stops = np.where(on_off == -1)[0]

    return zip(starts, stops)
Exemplo n.º 30
0
def local_maxima(array, min_distance = 1, periodic=False, edges_allowed=True):
    """Find all local maxima of the array, 
       separated by at least  min_distance.
       If uses ndimage.maximum_filter1d()
    """
    import scipy.ndimage as ndimage
    array = np.asarray(array)
    cval = 0
    if periodic:
       mode = 'wrap'
    elif edges_allowed:
       mode = 'nearest'
    else:
       mode = 'constant'
       cval = array.max()+1
    max_points = array == ndimage.maximum_filter1d(array,
                  1+2*min_distance, mode=mode, cval=cval)

    return [indices[max_points] for indices in  np.indices(array.shape)]
Exemplo n.º 31
0
def findlines(x, y, fwhm, smoothwindow = 'hanning', sigma_threshold = 3.):

    '''
    Several things here and I am not quite sure yet what turn out to be  useful
    - smoothing: show real peaks and not just noise
    - maximum_filter = array: will find the peaks
    - sigma_clipping = are the peaks large enough to be relevant?
    
    Parameters
    ----------
    x : ndarray
        x values, e.g. wavelength
    y : ndarray
        y values, e.g. flux or res_flux / error
    fwhm : float
        estimate for FWHM of lines. Used as smoothing scale
    smoothwindow : string or None
        if `smoothwindow` is on of `['flat', 'hanning', 'hamming',
        'bartlett', 'blackman']` a correspondig window function 
        will be used to smooth the signal before line detection.
    
    Returns
    -------
    peaks : ndarray
        index numbers for peaks found
    '''
    fwhminpix = int(fwhm / np.diff(x).mean())
    if smoothwindow is not None:
        #print smoothwindow
        #print fwhminpix
        y = smooth(y, window_len = 3*fwhminpix, window = smoothwindow)

    maxindex = (maximum_filter1d(y, max(fwhminpix,3)) == y)
    maxindex = maxindex & (y > (y.mean() + sigma_threshold * y.std()))
    # sigma_clipping works only if there is plenty of continuum
    #clipped_y = sigma_clipping(y, threshold = sigma_threshold)
    # believe only peaks which are so large, that the get clipped by sigma_clipping
    #maxindex = maxindex & (clipped_y.mask == False)

    return np.flatnonzero(maxindex)
Exemplo n.º 32
0
def greatest_amplitude(signals, size, threshold=0.005):
    """
    Return 1D array with indices of the signal with the greatest amplitude.
    If signals.ndim == 1 or signals.shape[1] == 1 returns an array of boolean
    values which are True, if amplitude of signal is > threshold.

    Parameters
    ----------
    signals : 2D array
        2D array of a signal for which the amplitudes should be compared. If
        only one signal is given, the amplitude is compared to threshold.
        Otherwise, the amplitude is compared to the other amplitudes of the
        other signals.
        First dimension datapoints, second dimension different signals.
    size : int
        Size of the window the amplitudes should be calculated from and
        compared to each other. Needs to be >= 2.
    threshold : float
        Value, the amplitude of a signal is compared to, if only one signal is
        provided.

    Returns
    -------
    1D numpy.ndarray dtype=int or dtype=bool
        Index of where amplitude of first signal (first axis) is greater than
        the other signals or True where amplitude is greater than the
        threshold.
    """
    if size <= 1:
        raise ValueError("Size should be at least 2, to calculate amplitudes "
                         "within the signals!")
    amplitude = (maximum_filter1d(signals, size, axis=0) -
                 minimum_filter1d(signals, size, axis=0))
    if signals.ndim == 1 or signals.shape[1] == 1:
        greatest = amplitude > threshold
        greatest.shape = -1
    else:
        greatest = np.argmax(amplitude, axis=1)

    return greatest
Exemplo n.º 33
0
def scan_blink_correction(image, axis=1):
    """
    This filter can be used to filter out impulsive noise from a 2D array along a single axis.
    As filter we apply a sequence of two filters. First a min-filter and then a max-filter.
    This composite non-linear filter technique is also called opening filter.

    This filter will completely remove single-pixel (along given axis) brightness spikes from the
    image but will cause the image to be more "blocky"/less smooth.
    Of course you need to ensure that the image features of interest are larger than

    @param numpy.ndarray image: A 2D numpy array to be filtered (e.g. image data)
    @param int axis: The axis along which to apply the 1D filter
    @return numpy.ndarray: The filtered image. Same dimensions as input image
    """

    if not isinstance(image, np.ndarray):
        logger.error('Image must be 2D numpy array.')
        return image
    if image.ndim != 2:
        logger.error('Image must be 2D numpy array.')
        return image
    if axis != 0 and axis != 1:
        logger.error('Optional axis parameter must be either 0 or 1.')
        return image

    # Calculate median value of the image. This value is used for padding image boundaries during
    # filtering.
    median = np.median(image)
    # Apply a minimum filter along the chosen axis.
    filt_img = minimum_filter1d(image, size=2, axis=axis, mode='constant', cval=median)
    # Apply a maximum filter along the chosen axis. Flip the previous filter result to avoid
    # translation of image features.
    filt_img = maximum_filter1d(
        np.flip(filt_img, axis), size=2, axis=axis, mode='constant', cval=median)
    # Flip back the image to obtain original orientation and return result.
    return np.flip(filt_img, axis)
Exemplo n.º 34
0
def _prominent_peaks(image,
                     min_xdistance=1,
                     min_ydistance=1,
                     threshold=None,
                     num_peaks=np.inf):
    """Return peaks with non-maximum suppression.

    Identifies most prominent features separated by certain distances.
    Non-maximum suppression with different sizes is applied separately
    in the first and second dimension of the image to identify peaks.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    min_xdistance : int
        Minimum distance separating features in the x dimension.
    min_ydistance : int
        Minimum distance separating features in the y dimension.
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(image)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    intensity, xcoords, ycoords : tuple of array
        Peak intensity values, x and y indices.
    """

    img = image.copy()
    rows, cols = img.shape

    if threshold is None:
        threshold = 0.5 * np.max(img)

    ycoords_size = 2 * min_ydistance + 1
    xcoords_size = 2 * min_xdistance + 1
    img_max = ndi.maximum_filter1d(img,
                                   size=ycoords_size,
                                   axis=0,
                                   mode='constant',
                                   cval=0)
    img_max = ndi.maximum_filter1d(img_max,
                                   size=xcoords_size,
                                   axis=1,
                                   mode='constant',
                                   cval=0)
    mask = (img == img_max)
    img *= mask
    img_t = img > threshold

    label_img = measure.label(img_t)
    props = measure.regionprops(label_img, img_max)

    # Sort the list of peaks by intensity, not left-right, so larger peaks
    # in Hough space cannot be arbitrarily suppressed by smaller neighbors
    props = sorted(props, key=lambda x: x.max_intensity)[::-1]
    coords = np.array([np.round(p.centroid) for p in props], dtype=int)

    img_peaks = []
    ycoords_peaks = []
    xcoords_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,
                                        -min_xdistance:min_xdistance + 1]

    for ycoords_idx, xcoords_idx in coords:
        accum = img_max[ycoords_idx, xcoords_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            ycoords_nh = ycoords_idx + ycoords_ext
            xcoords_nh = xcoords_idx + xcoords_ext

            # no reflection for distance neighbourhood
            ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)
            ycoords_nh = ycoords_nh[ycoords_in]
            xcoords_nh = xcoords_nh[ycoords_in]

            # reflect xcoords and assume xcoords are continuous,
            # e.g. for angles:
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            xcoords_low = xcoords_nh < 0
            ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]
            xcoords_nh[xcoords_low] += cols
            xcoords_high = xcoords_nh >= cols
            ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]
            xcoords_nh[xcoords_high] -= cols

            # suppress neighbourhood
            img_max[ycoords_nh, xcoords_nh] = 0

            # add current feature to peaks
            img_peaks.append(accum)
            ycoords_peaks.append(ycoords_idx)
            xcoords_peaks.append(xcoords_idx)

    img_peaks = np.array(img_peaks)
    ycoords_peaks = np.array(ycoords_peaks)
    xcoords_peaks = np.array(xcoords_peaks)

    if num_peaks < len(img_peaks):
        idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]
        img_peaks = img_peaks[idx_maxsort]
        ycoords_peaks = ycoords_peaks[idx_maxsort]
        xcoords_peaks = xcoords_peaks[idx_maxsort]

    return img_peaks, xcoords_peaks, ycoords_peaks
Exemplo n.º 35
0
data = pd.read_csv(args.file[0], sep='\t', header=None, names=range(8))

# remain the necessary data
acceleration = data[data[1] == 'ACC'][[0, 2, 3, 4]].to_numpy(dtype=np.float)

# print(acceleration)

# Calculate acceleration magnitude and Subtract gravity
start_time = acceleration[0, 0] / 1000
time = acceleration[:, 0] / 1000 - start_time
magnitude = np.abs(np.sqrt(np.sum(acceleration[:, 1:4]**2, 1)) - 9.81)

# smooth the data with maximum filter and then gaussian filter
# max_magnitude = maximum_filter1d(magnitude, 50)
# smoothed_magnitude = gaussian_filter1d(magnitude, 100)
max_smooth_magnitude = gaussian_filter1d(maximum_filter1d(magnitude, 50), 100)

plt.plot(time, magnitude)
# plt.plot(time, max_magnitude, label='max')
# plt.plot(time, smoothed_magnitude, label='smoothed')
plt.plot(time, max_smooth_magnitude, label='max smooth')
plt.show()

# set threshold
running_threshold = 20
walking_threshold = 5
time_threshold = 10

# discriminate data and visualize the results
current_state = None
start = 0
Exemplo n.º 36
0
def hough_line_peaks(hspace,
                     angles,
                     dists,
                     min_distance=9,
                     min_angle=10,
                     threshold=None,
                     num_peaks=np.inf):
    """Return peaks in hough transform.

    Identifies most prominent lines separated by a certain angle and distance
    in a hough transform. Non-maximum suppression with different sizes is
    applied separately in the first (distances) and second (angles) dimension
    of the hough space to identify peaks.

    Parameters
    ----------
    hspace : (N, M) array
        Hough space returned by the `hough_line` function.
    angles : (M,) array
        Angles returned by the `hough_line` function. Assumed to be continuous.
        (`angles[-1] - angles[0] == PI`).
    dists : (N, ) array
        Distances returned by the `hough_line` function.
    min_distance : int
        Minimum distance separating lines (maximum filter size for first
        dimension of hough space).
    min_angle : int
        Minimum angle separating lines (maximum filter size for second
        dimension of hough space).
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    hspace, angles, dists : tuple of array
        Peak values in hough space, angles and distances.

    Examples
    --------
    >>> from skimage.transform import hough_line, hough_line_peaks
    >>> from skimage.draw import line
    >>> img = np.zeros((15, 15), dtype=np.bool_)
    >>> rr, cc = line(0, 0, 14, 14)
    >>> img[rr, cc] = 1
    >>> rr, cc = line(0, 14, 14, 0)
    >>> img[cc, rr] = 1
    >>> hspace, angles, dists = hough_line(img)
    >>> hspace, angles, dists = hough_line_peaks(hspace, angles, dists)
    >>> len(angles)
    2

    """

    hspace = hspace.copy()
    rows, cols = hspace.shape

    if threshold is None:
        threshold = 0.5 * np.max(hspace)

    distance_size = 2 * min_distance + 1
    angle_size = 2 * min_angle + 1
    hspace_max = ndi.maximum_filter1d(hspace,
                                      size=distance_size,
                                      axis=0,
                                      mode='constant',
                                      cval=0)
    hspace_max = ndi.maximum_filter1d(hspace_max,
                                      size=angle_size,
                                      axis=1,
                                      mode='constant',
                                      cval=0)
    mask = (hspace == hspace_max)
    hspace *= mask
    hspace_t = hspace > threshold

    label_hspace = measure.label(hspace_t)
    props = measure.regionprops(label_hspace, hspace_max)

    # Sort the list of peaks by intensity, not left-right, so larger peaks
    # in Hough space cannot be arbitrarily suppressed by smaller neighbors
    props = sorted(props, key=lambda x: x.max_intensity)[::-1]
    coords = np.array([np.round(p.centroid) for p in props], dtype=int)

    hspace_peaks = []
    dist_peaks = []
    angle_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    dist_ext, angle_ext = np.mgrid[-min_distance:min_distance + 1,
                                   -min_angle:min_angle + 1]

    for dist_idx, angle_idx in coords:
        accum = hspace_max[dist_idx, angle_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            dist_nh = dist_idx + dist_ext
            angle_nh = angle_idx + angle_ext

            # no reflection for distance neighbourhood
            dist_in = np.logical_and(dist_nh > 0, dist_nh < rows)
            dist_nh = dist_nh[dist_in]
            angle_nh = angle_nh[dist_in]

            # reflect angles and assume angles are continuous, e.g.
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            angle_low = angle_nh < 0
            dist_nh[angle_low] = rows - dist_nh[angle_low]
            angle_nh[angle_low] += cols
            angle_high = angle_nh >= cols
            dist_nh[angle_high] = rows - dist_nh[angle_high]
            angle_nh[angle_high] -= cols

            # suppress neighbourhood
            hspace_max[dist_nh, angle_nh] = 0

            # add current line to peaks
            hspace_peaks.append(accum)
            dist_peaks.append(dists[dist_idx])
            angle_peaks.append(angles[angle_idx])

    hspace_peaks = np.array(hspace_peaks)
    dist_peaks = np.array(dist_peaks)
    angle_peaks = np.array(angle_peaks)

    if num_peaks < len(hspace_peaks):
        idx_maxsort = np.argsort(hspace_peaks)[::-1][:num_peaks]
        hspace_peaks = hspace_peaks[idx_maxsort]
        dist_peaks = dist_peaks[idx_maxsort]
        angle_peaks = angle_peaks[idx_maxsort]

    return hspace_peaks, angle_peaks, dist_peaks
Exemplo n.º 37
0
def transform_rois(
    roi_file,
    source_image_filename,
    destination_image_filename,
    control_point_file,
    output_filename,
    temp_output_filename,
    log_file_path,
    error_file_path,
    roi_reference_image=None,
    selem_size=15,
    nii_scale=None,
    transformation_matrix=None,
    debug=False,
    print_value_round_decimals=2,
    z_filter_padding=2,
):
    """
    Using a source image (e.g. downsampled stack), transform an ImageJ
    zipped collection of ROIs into the coordinate space of a
    destination image (e.g. an atlas), using the inverse control point file
    from an existing niftyreg registration

    :param roi_file: .zip collection of ImageJ ROIs
    :param source_image_filename: Image that the ROIs are defined in
    :param destination_image_filename: Image in the destination coordinate space
    :param control_point_file: Transformation from source to destination
    :param output_filename: output filename for the resulting nifti file
    :param temp_output_filename: Temporary file for registration
    :param log_file_path: Path to save niftyreg logs
    :param error_file_path: Path to save niftyreg errors
    :param roi_reference_image: Image on which the ROIs are defined (if not the
    downsampled image in the registration directory)
    :param selem_size: Structure element size for closing
    :param nii_scale: Scaling to correctly save the temporary nifti image
    :param transformation_matrix: Affine transform for the temporary nifti
    image
    :param print_value_round_decimals: How many decimal places to round
    values printed to console.
    :param z_filter_padding: Size of the filter in z when correcting for
    unlabled slices.
    :param debug: If True, don't delete temporary files
    """

    print("Loading ROIs")
    rois = read_roi_zip(roi_file)
    number_rois = len(rois)
    print(f"{number_rois} rois found")
    x = []
    y = []
    z = []
    for key in rois:
        for position in range(0, len(rois[key]["x"])):
            x.append(rois[key]["x"][position])
            y.append(rois[key]["y"][position])
            z.append(rois[key]["position"])

    print("Loading downsampled image image")
    downsampled_source_image = brainio.load_any(str(source_image_filename))

    print(f"Source image size: "
          f"x:{downsampled_source_image.shape[0]}, "
          f"y:{downsampled_source_image.shape[1]}, "
          f"y:{downsampled_source_image.shape[2]}")

    downsampled_source_image[:] = 0

    if roi_reference_image is not None:
        print("Reference image flag used. Loading reference image")
        reference_image_shape = brainio.get_size_image_from_file_paths(
            roi_reference_image)

        print(f"Reference image shape is "
              f"x:{reference_image_shape['x']}, "
              f"y:{reference_image_shape['y']}, "
              f"z:{reference_image_shape['z']}")

        x_downsample_factor = (reference_image_shape["x"] /
                               downsampled_source_image.shape[0])
        y_downsample_factor = (reference_image_shape["y"] /
                               downsampled_source_image.shape[1])
        z_downsample_factor = (reference_image_shape["z"] /
                               downsampled_source_image.shape[2])

        print(f"ROIs will be downsampled by a factor of "
              f"x:{round(x_downsample_factor, print_value_round_decimals)}, "
              f"y:{round(y_downsample_factor, print_value_round_decimals)}, "
              f"z:{round(z_downsample_factor, print_value_round_decimals)}")

    # TODO: optimise this
    print("Creating temporary ROI image")
    for position in range(0, len(x)):
        if roi_reference_image is None:
            downsampled_source_image[x[position], y[position], z[position]] = 1
        else:
            x_scale = int(round(x[position] / x_downsample_factor))
            y_scale = int(round(y[position] / y_downsample_factor))
            z_scale = int(round(z[position] / z_downsample_factor))
            downsampled_source_image[x_scale, y_scale, z_scale] = 1

    print("Cleaning up ROI image")
    # TODO speed this up - parallelise?
    selem = morphology.selem.square(selem_size)
    for plane in tqdm(range(0, downsampled_source_image.shape[2])):
        tmp = morphology.binary.binary_closing(downsampled_source_image[:, :,
                                                                        plane],
                                               selem=selem)
        tmp = morphology.convex_hull_object(tmp)
        downsampled_source_image[:, :,
                                 plane] = morphology.binary.binary_closing(
                                     tmp, selem=selem)

    if roi_reference_image is not None:
        if z_downsample_factor < 1:
            print("ROI was defined at a lower z-resolution than the atlas. "
                  "Correcting with a maximum filter")
            z_filter_size = int(round(1 / z_scale)) + z_filter_padding
            downsampled_source_image = maximum_filter1d(
                downsampled_source_image, z_filter_size, axis=2)

    print(f"Saving temporary ROI image at: {temp_output_filename}")
    brainio.to_nii(
        downsampled_source_image,
        str(temp_output_filename),
        scale=nii_scale,
        affine_transform=transformation_matrix,
    )

    print("Preparing ROI registration")
    nifty_reg_binaries_folder = get_niftyreg_binaries()
    program_path = get_binary(nifty_reg_binaries_folder, PROGRAM_NAME)

    reg_cmd = prepare_segmentation_cmd(
        program_path,
        temp_output_filename,
        output_filename,
        destination_image_filename,
        control_point_file,
    )
    print("Running ROI registration")
    try:
        safe_execute_command(reg_cmd, log_file_path, error_file_path)
    except SafeExecuteCommandError as err:
        raise RegistrationError("ROI registration failed; {}".format(err))

    print(f"Registered ROI image can be found at {output_filename}")

    if not debug:
        print("Deleting temporary files")
        remove(temp_output_filename)
        remove(log_file_path)
        remove(error_file_path)
Exemplo n.º 38
0
from scipy.ndimage import maximum_filter1d

max_Filt_cut = 20
slcA = slice(124, 149)  # low energy
slcB = slice(740, 779)  # high energy
thresh = 2

print "Analyzing spectra"
some_good_hits = []
hits_idx = data.keys()
for h in hits_idx:
    spec = data[h]["spectrum"]
    rmsd_score = data[h]['best']

    # find the good spectrums
    spec_filt = maximum_filter1d(spec, max_Filt_cut)
    specA = spec_filt[slcA]
    specB = spec_filt[slcB]
    spec_bkgrnd = np.median(spec_filt)

    sigA = specA.max() - spec_bkgrnd
    sigB = specB.max() - spec_bkgrnd

    if spec_filt[slcA].max() > thresh or spec_filt[slcB].max() > thresh:
        some_good_hits.append(h)
        sigA = specA.max() - spec_bkgrnd
        sigB = specB.max() - spec_bkgrnd

    data[h]["spec_filt"] = spec_filt
    data[h]["sigA"] = sigA
    data[h]["sigB"] = sigB
Exemplo n.º 39
0
def _prominent_peaks(image, min_xdistance=1, min_ydistance=1,
                     threshold=None, num_peaks=np.inf):
    """Return peaks with non-maximum suppression.

    Identifies most prominent features separated by certain distances.
    Non-maximum suppression with different sizes is applied separately
    in the first and second dimension of the image to identify peaks.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    min_xdistance : int
        Minimum distance separating features in the x dimension.
    min_ydistance : int
        Minimum distance separating features in the y dimension.
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(image)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    intensity, xcoords, ycoords : tuple of array
        Peak intensity values, x and y indices.
    """

    img = image.copy()
    rows, cols = img.shape

    if threshold is None:
        threshold = 0.5 * np.max(img)

    ycoords_size = 2 * min_ydistance + 1
    xcoords_size = 2 * min_xdistance + 1
    img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,
                                   mode='constant', cval=0)
    img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,
                                   mode='constant', cval=0)
    mask = (img == img_max)
    img *= mask
    img_t = img > threshold

    label_img = measure.label(img_t)
    props = measure.regionprops(label_img, img_max)

    # Sort the list of peaks by intensity, not left-right, so larger peaks
    # in Hough space cannot be arbitrarily suppressed by smaller neighbors
    props = sorted(props, key=lambda x: x.max_intensity)[::-1]
    coords = np.array([np.round(p.centroid) for p in props], dtype=int)

    img_peaks = []
    ycoords_peaks = []
    xcoords_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,
                                        -min_xdistance:min_xdistance + 1]

    for ycoords_idx, xcoords_idx in coords:
        accum = img_max[ycoords_idx, xcoords_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            ycoords_nh = ycoords_idx + ycoords_ext
            xcoords_nh = xcoords_idx + xcoords_ext

            # no reflection for distance neighbourhood
            ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)
            ycoords_nh = ycoords_nh[ycoords_in]
            xcoords_nh = xcoords_nh[ycoords_in]

            # reflect xcoords and assume xcoords are continuous,
            # e.g. for angles:
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            xcoords_low = xcoords_nh < 0
            ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]
            xcoords_nh[xcoords_low] += cols
            xcoords_high = xcoords_nh >= cols
            ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]
            xcoords_nh[xcoords_high] -= cols

            # suppress neighbourhood
            img_max[ycoords_nh, xcoords_nh] = 0

            # add current feature to peaks
            img_peaks.append(accum)
            ycoords_peaks.append(ycoords_idx)
            xcoords_peaks.append(xcoords_idx)

    img_peaks = np.array(img_peaks)
    ycoords_peaks = np.array(ycoords_peaks)
    xcoords_peaks = np.array(xcoords_peaks)

    if num_peaks < len(img_peaks):
        idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]
        img_peaks = img_peaks[idx_maxsort]
        ycoords_peaks = ycoords_peaks[idx_maxsort]
        xcoords_peaks = xcoords_peaks[idx_maxsort]

    return img_peaks, xcoords_peaks, ycoords_peaks
Exemplo n.º 40
0
 def smooth(self, chromotogram: list):
     maxi = maximum_filter1d(chromotogram[1], 3)
     gauss = gaussian_filter1d(maxi, 2)
     return [chromotogram[0], gauss]
Exemplo n.º 41
0
def create_samples_from_sequence(h5file,
                                 sun3d_data_path,
                                 seq_name,
                                 baseline_range,
                                 sharpness,
                                 sharpness_window=30):
    """Read a sun3d sequence and write samples to the h5file
    
    h5file: h5py.File handle
    
    sun3d_data_path: str
        base path to the sun3d data

    seq_name: str
        the name of the sequence e.g. "mit_32_d463/d463_1"

    baseline_range: tuple(float,float)
        The allowed baseline range

    sharpness: numpy.ndarray 1D
        Array with the sharpness score for each image

    sharpness_window: int
        Window for detecting sharp images

    Returns the number of generated groups
    """
    generated_groups = 0
    seq_path = os.path.join(sun3d_data_path, seq_name)
    group_prefix = seq_name.replace('/', '.')
    if not os.path.exists(os.path.join(seq_path, 'extrinsics')):
        return 0

    # file list
    image_files = [
        f for f in sorted(os.listdir(os.path.join(seq_path, 'image')))
        if f.endswith('.jpg')
    ]
    depth_files = [
        f for f in sorted(os.listdir(os.path.join(seq_path, 'depthTSDF')))
        if f.endswith('.png')
    ]
    extrinsics_files = [
        f for f in sorted(os.listdir(os.path.join(seq_path, 'extrinsics')))
        if f.endswith('.txt')
    ]

    # read intrinsics
    intrinsics = np.loadtxt(os.path.join(seq_path, 'intrinsics.txt'))

    # read extrinsics params
    extrinsics = np.loadtxt(
        os.path.join(seq_path, 'extrinsics', extrinsics_files[-1]))

    # read time stamp
    img_ids, img_timestamps = read_frameid_timestamp(image_files)
    _, depth_timestamps = read_frameid_timestamp(depth_files)

    # find a depth for each image
    idx_img2depth = []
    for img_timestamp in img_timestamps:
        idx_img2depth.append(
            np.argmin(abs(depth_timestamps[:] - img_timestamp)))

    # find sharp images with nonmaximum suppression
    assert sharpness.size == len(image_files)
    sharpness_maxfilter = maximum_filter1d(np.asarray(sharpness),
                                           size=sharpness_window,
                                           mode='constant',
                                           cval=0)
    sharp_images_index = np.where(sharpness == sharpness_maxfilter)[0]

    used_views = set()
    for i1, frame_idx1 in enumerate(sharp_images_index):
        if i1 in used_views:
            continue

        R1, t1 = read_Rt(extrinsics, frame_idx1)
        i2 = i1 + 1

        depth_file = os.path.join(seq_path, 'depthTSDF',
                                  depth_files[idx_img2depth[frame_idx1]])
        depth1 = read_depth(depth_file)

        if np.count_nonzero(np.isfinite(depth1)
                            & (depth1 > 0)) < 0.5 * depth1.size:
            continue

        image1 = read_image(
            os.path.join(seq_path, 'image', image_files[frame_idx1]))
        view1 = View(R=R1,
                     t=t1,
                     K=intrinsics,
                     image=image1,
                     depth=depth1,
                     depth_metric='camera_z')

        views = [view1]
        used_views.add(i1)

        for i2 in range(i1 + 1, sharp_images_index.size):
            frame_idx2 = sharp_images_index[i2]
            R2, t2 = read_Rt(extrinsics, frame_idx2)
            baseline = np.linalg.norm(t1 - t2)  # unit is meters
            if baseline < baseline_range[0] or baseline > baseline_range[1]:
                continue

            cosine = np.dot(R1[2, :], R2[2, :])
            if cosine < math.cos(math.radians(70)):
                continue

            depth_file = os.path.join(seq_path, 'depthTSDF',
                                      depth_files[idx_img2depth[frame_idx2]])
            depth2 = read_depth(depth_file)

            if np.count_nonzero(np.isfinite(depth2)
                                & (depth2 > 0)) < 0.5 * depth2.size:
                continue

            view2 = View(R=R2,
                         t=t2,
                         K=intrinsics,
                         image=None,
                         depth=depth2,
                         depth_metric='camera_z')
            check_params = {
                'min_valid_threshold': 0.4,
                'min_depth_consistent': 0.2
            }
            if check_depth_consistency(
                    view1, [view2], **
                    check_params) and check_depth_consistency(
                        view2, [view1], **check_params):
                image2 = read_image(
                    os.path.join(seq_path, 'image', image_files[frame_idx2]))
                view2 = view2._replace(image=image2)
                views.append(view2)
                used_views.add(i2)
                # print(baseline, cosine)

        if len(views) > 1:
            group_name = group_prefix + '-{:07d}'.format(img_ids[i1])
            print('writing', group_name)

            view_pairs = []
            for pair in itertools.product(range(len(views)), repeat=2):
                if pair[0] != pair[1]:
                    baseline = np.linalg.norm(views[pair[0]].t -
                                              views[pair[1]].t)
                    if baseline >= baseline_range[
                            0] or baseline <= baseline_range[1]:
                        view_pairs.extend(pair)
            for i, v in enumerate(views):
                view_group = h5file.require_group(group_name +
                                                  '/frames/t0/v{0}'.format(i))
                write_view(view_group, v)

            # write valid image pair combinations to the group t0
            viewpoint_pairs = np.array(view_pairs, dtype=np.int32)
            time_group = h5file[group_name]['frames/t0']
            time_group.attrs['viewpoint_pairs'] = viewpoint_pairs
            generated_groups += 1

    return generated_groups
Exemplo n.º 42
0
def hough_peaks(hspace, angles, dists, min_distance=10, min_angle=10,
                threshold=None, num_peaks=np.inf):
    """Return peaks in hough transform.

    Identifies most prominent lines separated by a certain angle and distance in
    a hough transform. Non-maximum suppression with different sizes is applied
    separately in the first (distances) and second (angles) dimension of the
    hough space to identify peaks.

    Parameters
    ----------
    hspace : (N, M) array
        Hough space returned by the `hough` function.
    angles : (M,) array
        Angles returned by the `hough` function. Assumed to be continuous
        (`angles[-1] - angles[0] == PI`).
    dists : (N, ) array
        Distances returned by the `hough` function.
    min_distance : int
        Minimum distance separating lines (maximum filter size for first
        dimension of hough space).
    min_angle : int
        Minimum angle separating lines (maximum filter size for second
        dimension of hough space).
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    hspace, angles, dists : tuple of array
        Peak values in hough space, angles and distances.

    Examples
    --------
    >>> import numpy as np
    >>> from skimage.transform import hough, hough_peaks
    >>> from skimage.draw import line
    >>> img = np.zeros((15, 15), dtype=np.bool_)
    >>> rr, cc = line(0, 0, 14, 14)
    >>> img[rr, cc] = 1
    >>> rr, cc = line(0, 14, 14, 0)
    >>> img[cc, rr] = 1
    >>> hspace, angles, dists = hough(img)
    >>> hspace, angles, dists = hough_peaks(hspace, angles, dists)
    >>> angles
    array([  0.74590887,  -0.79856126])
    >>> dists
    array([  10.74418605,  0.51162791])

    """

    hspace = hspace.copy()
    rows, cols = hspace.shape

    if threshold is None:
        threshold = 0.5 * np.max(hspace)

    distance_size = 2 * min_distance + 1
    angle_size = 2 * min_angle + 1
    hspace_max = ndimage.maximum_filter1d(hspace, size=distance_size, axis=0,
                                          mode='constant', cval=0)
    hspace_max = ndimage.maximum_filter1d(hspace_max, size=angle_size, axis=1,
                                          mode='constant', cval=0)
    mask = (hspace == hspace_max)
    hspace *= mask
    hspace_t = hspace > threshold

    label_hspace = morphology.label(hspace_t)
    props = measure.regionprops(label_hspace, ['Centroid'])
    coords = np.array([np.round(p['Centroid']) for p in props], dtype=int)

    hspace_peaks = []
    dist_peaks = []
    angle_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    dist_ext, angle_ext = np.mgrid[-min_distance:min_distance + 1,
                                   -min_angle:min_angle + 1]

    for dist_idx, angle_idx in coords:
        accum = hspace[dist_idx, angle_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            dist_nh = dist_idx + dist_ext
            angle_nh = angle_idx + angle_ext

            # no reflection for distance neighbourhood
            dist_in = np.logical_and(dist_nh > 0, dist_nh < rows)
            dist_nh = dist_nh[dist_in]
            angle_nh = angle_nh[dist_in]

            # reflect angles and assume angles are continuous, e.g.
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            angle_low = angle_nh < 0
            dist_nh[angle_low] = rows - dist_nh[angle_low]
            angle_nh[angle_low] += cols
            angle_high = angle_nh >= cols
            dist_nh[angle_high] = rows - dist_nh[angle_high]
            angle_nh[angle_high] -= cols

            # suppress neighbourhood
            hspace[dist_nh, angle_nh] = 0

            # add current line to peaks
            hspace_peaks.append(accum)
            dist_peaks.append(dists[dist_idx])
            angle_peaks.append(angles[angle_idx])

    hspace_peaks = np.array(hspace_peaks)
    dist_peaks = np.array(dist_peaks)
    angle_peaks = np.array(angle_peaks)

    if num_peaks < len(hspace_peaks):
        idx_maxsort = np.argsort(hspace_peaks)[::-1][:num_peaks]
        hspace_peaks = hspace_peaks[idx_maxsort]
        dist_peaks = dist_peaks[idx_maxsort]
        angle_peaks = angle_peaks[idx_maxsort]

    return hspace_peaks, angle_peaks, dist_peaks
Exemplo n.º 43
0
# The sharp lines are caused by water-vapor and the wider lines are from a
# polysterene calibration film.
#
# To extract the real absorption spektrum we can measure the spectrum without
# the film and calculate the absorption. It is also possible to subtract the
# baseline. This approach also allows us to use watervapor lines for
# calibration. Notice that latter also depend on the humidity and temperature,
# also the presented baseline approach is just an approximation.
#
# We approximate the baseline by taking the local maxima and interpolate
# inbeween.

from scipy.interpolate import interp1d
for ch in [63, 58]:
    fig, ax = plt.subplots()
    back = nd.maximum_filter1d(pr[:, ch], 15)
    idx = back == pr[:, ch]

    idx[:200] = False
    idx[-100:] = False
    touching = back[idx]
    f = interp1d(cwl[idx], touching, bounds_error=False, kind='linear')
    plt.plot(cwl, f(cwl))
    plt.plot(cwl, pr[:, ch])
    plt.plot(cwl, np.interp(cwl, cwl[idx], touching) - pr[:, ch] + 5000, lw=1)

    # Load water vapor data
    p = data_io.get_example_path('vapor')
    ftir_x, ftir_vapor = np.load(p).T

    # Convolve vapor spectrum with a gaussian
Exemplo n.º 44
0
def get_features(sig, sensor_id):
    """Analysis of a signal. Grabs temporal and frequential features.
    Returns a pandas dataframe"""

    fourier = fftpack.fft(sig.values)
    real, imag = np.real(fourier), np.imag(fourier)

    # Temporal data
    features = {}
    features[f"{sensor_id}_mean"] = [sig.mean()]
    features[f"{sensor_id}_var"] = [sig.var()]
    features[f"{sensor_id}_skew"] = [sig.skew()]
    features[f"{sensor_id}_delta"] = [sig.max() - sig.min()]
    features[f"{sensor_id}_mad"] = [sig.mad()]
    features[f"{sensor_id}_kurtosis"] = [sig.kurtosis()]
    features[f"{sensor_id}_sem"] = [sig.sem()]
    features[f"{sensor_id}_q5"] = [np.quantile(sig, 0.05)]
    features[f"{sensor_id}_q25"] = [np.quantile(sig, 0.25)]
    features[f"{sensor_id}_q75"] = [np.quantile(sig, 0.75)]
    features[f"{sensor_id}_q95"] = [np.quantile(sig, 0.95)]
    grad_rol_max = [maximum_filter1d(np.gradient(np.abs(sig.values)), 50)]
    delta = np.max(grad_rol_max) - np.min(grad_rol_max)
    features[f"{sensor_id}_grmax_delta"] = delta

    # Frequencial
    features[f"{sensor_id}_real_mean"] = [real.mean()]
    features[f"{sensor_id}_real_var"] = [real.var()]
    features[f"{sensor_id}_real_delta"] = [real.max() - real.min()]

    features[f"{sensor_id}_imag_mean"] = [imag.mean()]
    features[f"{sensor_id}_imag_var"] = [imag.var()]
    features[f"{sensor_id}_imag_delta"] = [imag.max() - imag.min()]

    features[f"{sensor_id}_nb_peak"] = fc.number_peaks(sig.values, 2)
    features[f"{sensor_id}_median_roll_std"] = np.median(
        pd.Series(sig).rolling(50).std().dropna().values)
    features[f"{sensor_id}_autocorr5"] = fc.autocorrelation(sig, 5)

    # Added 16
    features[f"{sensor_id}_nb_peak_3"] = fc.number_peaks(sig.values, 3)
    features[f"{sensor_id}_absquant95"] = np.quantile(np.abs(sig), 0.95)

    try:
        # Mel-frequency cepstral coefficients
        mfcc_mean = mfcc(sig.values).mean(axis=1)
        for i in range(20):
            features[f"{sensor_id}_mfcc_mean_{i}"] = mfcc_mean[i]
        # Contrast spectral
        spec_contrast = spectral_contrast(sig.values).mean(axis=1)
        for i in range(7):
            features[f"{sensor_id}_lib_spec_cont_{i}"] = spec_contrast[i]
        features[f"{sensor_id}_zero_cross"] = zero_crossing_rate(sig)[0].mean()
        # Added 16
        features[f"{sensor_id}_percentile_roll20_std_50"] = np.percentile(
            sig.rolling(20).std().dropna().values, 50)

    except:
        pass


# =============================================================================
# fftrhann20000 = np.sum(np.abs(np.fft.fft(np.hanning(len(z))*z)[:20000]))
# fftrhann20000_denoise = np.sum(np.abs(np.fft.fft(np.hanning(len(z))*den_sample)[:20000]))
# fftrhann20000_diff_rate = (fftrhann20000 - fftrhann20000_denoise)/fftrhann20000
# X['LGBM_fftrhann20000_diff_rate'] = fftrhann20000_diff_rate
# =============================================================================
    return pd.DataFrame.from_dict(features)