Ejemplo n.º 1
0
def bandpass(image, lshort, llong, threshold=None, truncate=4):
    """Remove noise and background variation.

    Convolve with a Gaussian to remove short-wavelength noise and subtract out
    long-wavelength variations, retaining features of intermediate scale.

    This implementation relies on scipy.ndimage.filters.gaussian_filter, and it
    is the fastest way known to the authors of performing a bandpass in
    Python.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the bandpassed image

    See Also
    --------
    legacy_bandpass, legacy_bandpass_fftw
    """
    lshort = validate_tuple(lshort, image.ndim)
    llong = validate_tuple(llong, image.ndim)
    if np.any([x * 2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1 / 256.
    boxcar = image.copy()
    result = np.array(image, dtype=np.float)
    for axis, (sigma, smoothing) in enumerate(zip(lshort, llong)):
        if smoothing > 1:
            uniform_filter1d(boxcar,
                             2 * smoothing + 1,
                             axis,
                             output=boxcar,
                             mode='nearest',
                             cval=0)
        if sigma > 0:
            correlate1d(result,
                        gaussian_kernel(sigma, truncate),
                        axis,
                        output=result,
                        mode='constant',
                        cval=0.0)
    result -= boxcar
    return np.where(result > threshold, result, 0)
Ejemplo n.º 2
0
def types_temp_draw3(all_recs):
    all_recs=types_draw
    # maxi=[]
    # for rec in all_recs:
    #     maxi.append(max(rec[1]))
    # print 'Max value of y-axis is %d' %(max(maxi))

    x_ticks=['sRequest','Paging', 'Attach']

    with PdfPages('types_temp3.pdf') as pdf:
        font_size='30'
        fig = plt.figure()
        ax=fig.add_subplot(111)
        ax.plot(np.arange(0,288), uniform_filter1d(all_recs[0],2),'r-',marker='D',markersize=5, markevery=7,linewidth=2)
        ax.plot(np.arange(0,288), uniform_filter1d(all_recs[1],2),'b--',marker='o',markersize=5, markevery=7,linewidth=2)
        ax.plot(np.arange(0,288), uniform_filter1d(all_recs[2],2),'g:',marker='s',markersize=5, markevery=7,linewidth=2)


        plt.subplots_adjust(top=0.95, right=0.9, left=0.21, bottom=0.21)
        plt.xticks(np.arange(0,289,step=48),[0,4,8,12,16,20,24],size=font_size,fontweight='bold')
        # plt.yticks(size=font_size,fontweight='bold')
        plt.yticks(np.arange(0,1.1,step=0.2),[0,0.2,0.4,0.6,0.8,1],size=font_size,fontweight='bold')

        plt.yticks(np.arange(0,35001,step=5000),[0,5,10,15,20,25,30,35],size=font_size,fontweight='bold')
        plt.xlabel('Time of Day (h)', fontsize=font_size,fontweight='bold')
        plt.ylabel('# of Records (k)',fontsize=font_size,fontweight='bold')
        # plt.ylim(0,100)
        plt.legend(x_ticks,fontsize=15, loc='lower right')
        plt.xlim(0,289)
        plt.grid()
        plt.show()
        pdf.savefig(fig)
Ejemplo n.º 3
0
def CorrMeasures(xplot,
                 Feats,
                 plotFeatures,
                 plotFeaturesSynch,
                 chSet=0,
                 Nmodels=1,
                 FeatsMulti=0):
    #correlation measures between features (Feats and FeatsMulti) and shifted parameter (xplot)
    MeasMI = [None] * len(
        stimAmp)  #mutual information [StimAmps][Realizations,features]
    MeasCorr = [None] * len(stimAmp)
    MeasSpCorr = [None] * len(stimAmp)

    for ui in range(len(stimAmp)):  #for each stimulus type
        MeasMI[ui], MeasSpCorr[ui], MeasCorr[ui] = {}, {}, {}
        for fkey in plotFeatures:  #for each univariate feature
            ysmooth = uniform_filter1d(Feats[ui][chSet][fkey],
                                       size=avrgWinSize,
                                       axis=0)  #smooth feature series
            MeasMI[ui][fkey] = mutual_info_regression(
                ysmooth, xplot[stimTS])  #mutual information
            MeasCorr[ui][fkey] = np.corrcoef(
                ysmooth.T, xplot[stimTS])[-1, :-1]  #correlation coefficient
            spTemp = spearmanr(
                ysmooth, xplot[stimTS])[0]  #spearman correlation coefficient
            if Nsims > 1:
                spTemp = spTemp[-1, :-1]
            MeasSpCorr[ui][fkey] = spTemp
        if Nmodels > 1:  #synchronization features
            for fkey in plotFeaturesSynch:
                ysmooth = uniform_filter1d(FeatsMulti[ui][fkey][:, :, 0],
                                           size=avrgWinSize,
                                           axis=0)
                MeasMI[ui][fkey] = mutual_info_regression(
                    ysmooth, xplot[stimTS - 1])
                MeasCorr[ui][fkey] = np.corrcoef(ysmooth.T,
                                                 xplot[stimTS])[-1, :-1]
                spTemp = spearmanr(ysmooth, xplot[stimTS])[0]
                if Nsims > 1:
                    spTemp = spTemp[-1, :-1]
                MeasSpCorr[ui][fkey] = spTemp

        MeasMI[ui]["StimAmp"] = stimAmp[ui] * np.ones(Nsims)
        MeasCorr[ui]["StimAmp"] = stimAmp[ui] * np.ones(Nsims)
        MeasSpCorr[ui]["StimAmp"] = stimAmp[ui] * np.ones(Nsims)

        MeasMI[ui]["channel"] = chSet
        MeasCorr[ui]["channel"] = chSet
        MeasSpCorr[ui]["channel"] = chSet

        #seizure onset time
        MeasMI[ui] = pd.DataFrame(MeasMI[ui])
        MeasCorr[ui] = pd.DataFrame(MeasCorr[ui])
        MeasSpCorr[ui] = pd.DataFrame(MeasSpCorr[ui])

    MeasMI_df = pd.concat([ii for ii in MeasMI], ignore_index=True)
    MeasSpCorr_df = pd.concat([ii for ii in MeasSpCorr], ignore_index=True)
    MeasCorr_df = pd.concat([ii for ii in MeasCorr], ignore_index=True)

    return MeasMI_df, MeasSpCorr_df, MeasCorr_df
Ejemplo n.º 4
0
def boxcar(image, size):
    """Compute a rolling (boxcar) average of an image.

    The kernel is square or rectangular.

    Parameters
    ----------
    image : ndarray
    size : number or tuple
        Size of rolling average (square or rectangular kernel) filter. Should
        be odd and larger than the particle diameter.
        Provide a tuple for different sizes per dimension.

    Returns
    -------
    result : array
        the rolling average image

    See Also
    --------
    bandpass
    """
    size = validate_tuple(size, image.ndim)
    if not np.all([x & 1 for x in size]):
        raise ValueError("Smoothing size must be an odd integer. Round up.")
    result = image.copy()
    for axis, _size in enumerate(size):
        if _size > 1:
            uniform_filter1d(result, _size, axis, output=result,
                             mode='nearest', cval=0)
    return result
Ejemplo n.º 5
0
def main():

    # Import pickle resuts
    with open('plot_10000.pkl', 'rb') as f:
        training_reward = pickle.load(f)

    # Create the episode list
    episode = list(range(100, 10000, 10))

    # Create the average list for plotting
    avg_list = [np.mean(training_reward[(i - 100):i]) for i in episode]

    # Smooth the average list
    avg_list_smoothed = uniform_filter1d(avg_list, 1)

    # Create the standard deviation list
    stdev_list = [np.std(training_reward[(i - 100):i]) for i in episode]
    stdev_list_smoothed = uniform_filter1d(stdev_list, 1)

    # Plot the episodic raw reward
    plt.plot(episode, training_reward[100:10000:10])
    tikz_save('raw_reward_plot.tikz')
    plt.close()

    # Plot the average reward for and standard deviation band
    plt.plot(episode, avg_list)
    plt.fill_between(episode,
                     avg_list_smoothed + stdev_list_smoothed,
                     avg_list_smoothed - stdev_list_smoothed,
                     alpha=0.3,
                     edgecolor="#ff7f0e")

    tikz_save('average_reward_plot.tikz')
    plt.close()
Ejemplo n.º 6
0
def show_training_results(hits, wins, lifetime, rewards, time, heatmap,
                          heatmap_start):
    fig, ax = plt.subplots(3, 2, figsize=(12, 12))
    fig.tight_layout(pad=3.0)

    n = len(time) // 10
    hits = uniform_filter1d(hits, size=n)
    ax[0, 0].plot(time, hits, 'r')  # row=0, col=0
    ax[0, 0].set_title('Hits')

    ax[1, 0].plot(time, wins, 'b')  # row=1, col=0
    ax[1, 0].set_title('Win probability')

    lifetime = uniform_filter1d(lifetime, size=n)
    ax[0, 1].plot(time, lifetime, 'g')  # row=0, col=1
    ax[0, 1].set_title('Lifetime')

    rewards = uniform_filter1d(rewards, size=n)
    ax[1, 1].plot(time, rewards, 'k')  # row=1, col=1
    ax[1, 1].set_title('Rewards')

    ax[2, 0].imshow(heatmap_start, cmap='hot', interpolation='nearest')
    ax[2, 0].set_title('Initial HeatMap')

    ax[2, 1].imshow(heatmap, cmap='hot', interpolation='nearest')
    ax[2, 1].set_title('HeatMap')

    plt.show()
Ejemplo n.º 7
0
def boxcar(image, size):
    """Compute a rolling (boxcar) average of an image.

    The kernel is square or rectangular.

    Parameters
    ----------
    image : ndarray
    size : number or tuple
        Size of rolling average (square or rectangular kernel) filter. Should
        be larger than the particle diameter.
        Provide a tuple for different sizes per dimension.

    Returns
    -------
    result : array
        the rolling average image

    See Also
    --------
    bandpass
    """
    size = validate_tuple(size, image.ndim)
    result = image.copy()
    for axis, _size in enumerate(size):
        if _size > 1:
            uniform_filter1d(result,
                             _size,
                             axis,
                             output=result,
                             mode='nearest',
                             cval=0)
    return result
Ejemplo n.º 8
0
def window_stdev(arr, radius):
    c1 = uniform_filter1d(arr,
                          radius * 2,
                          axis=0,
                          mode='constant',
                          origin=-radius)
    c2 = uniform_filter1d(arr * arr,
                          radius * 2,
                          axis=0,
                          mode='constant',
                          origin=-radius)
    return ((c2 - c1 * c1)**.5)[:-radius * 2 + 1, :-radius * 2 + 1]
 def window_stdev(data, radius):
     '''Explanation on https://stackoverflow.com/questions/18419871/improving-code-efficiency-standard-deviation-on-sliding-windows'''
     c1 = uniform_filter1d(data.astype(np.float32),
                           radius * 2,
                           mode='constant',
                           origin=-radius)
     c2 = uniform_filter1d(data.astype(np.float32) *
                           data.astype(np.float32),
                           radius * 2,
                           mode='constant',
                           origin=-radius)
     return ((c2 - c1 * c1)**.5)[:-radius * 2 + 1]
Ejemplo n.º 10
0
def bandpass(image, lshort, llong, threshold=None, truncate=4):
    """Remove noise and background variation.

    Convolve with a Gaussian to remove short-wavelength noise and subtract out
    long-wavelength variations, retaining features of intermediate scale.

    This implementation relies on scipy.ndimage.filters.gaussian_filter, and it
    is the fastest way known to the authors of performing a bandpass in
    Python.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the bandpassed image

    See Also
    --------
    legacy_bandpass, legacy_bandpass_fftw
    """
    lshort = validate_tuple(lshort, image.ndim)
    llong = validate_tuple(llong, image.ndim)
    if np.any([x*2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1/256.
    boxcar = image.copy()
    result = np.array(image, dtype=np.float)
    for axis, (sigma, smoothing) in enumerate(zip(lshort, llong)):
        if smoothing > 1:
            uniform_filter1d(boxcar, 2*smoothing+1, axis, output=boxcar,
                             mode='nearest', cval=0)
        if sigma > 0:
            correlate1d(result, gaussian_kernel(sigma, truncate), axis,
                        output=result, mode='constant', cval=0.0)
    result -= boxcar
    return np.where(result > threshold, result, 0)
Ejemplo n.º 11
0
    def to_label(self,
                 smoothing=0.250,
                 percentile=75,
                 plot=True,
                 figsize=(13, 5)):
        """Generates binary label (movement vs. rest) from accelererometer data by
            1. Uniformally smoothing acc data with `smoothing` seconds
            2. Thresholding according to `percentile`

        Args:
            smoothing (float, optional): Width of smoothing window in seconds. Defaults to 0.250.
            percentile (int, optional): Percentile of (smoothed signal) used to compute threshold. Defaults to 75.
            plot (bool, optional): Flag indicating whether to generate plot. Defaults to True.
            figsize (tuple, optional): figsize, only used when `plot = True`. Defaults to (13, 5).

        Raises:
           RuntimeError: Accelerometer labels have to be computed from single-channel signal. Please call .to_magnitude() method to get single accelerometer channel.

        Returns:
            AccData: Object of class AccData.
        """

        if not self.highpassed:
            _ = self.highpass()

        if len(self.data.shape) > 1:
            raise RuntimeError(
                "Accelerometer labels have to be computed from single-channel signal. Please call .to_magnitude() method to get single accelerometer channel."
            )

        smoothed = uniform_filter1d(self.data, size=int(smoothing * self.fs))

        thresh = np.percentile(smoothed, percentile)
        self.label = (np.abs(smoothed) > thresh).astype(np.float)

        label_smoothed = uniform_filter1d(self.label, size=int(self.fs))
        # assert np.allclose(self.label, label_smoothed)

        self.label = (label_smoothed > 0.5).astype(np.float)

        if plot:
            fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=figsize)

            ax1.plot(self.time(), smoothed)
            ax1.plot(self.time(), self.label * thresh * 5)
            ax1.set_title("Smoothed + label")

            ax2.plot(self.time(), self.data)
            ax2.set_title("Raw data")
        return self
Ejemplo n.º 12
0
def boxcar(image, size):
    size = validate_tuple(size, image.ndim)  #convert size in 2 dimension
    if not np.all([x & 1 for x in size]):  #check if size is odd integer
        raise ValueError("Smoothing size must be an odd integer. Round up.")
    result = image.copy()  #store image as results
    for axis, _size in enumerate(
            size):  #boxcar average the image about the x and y axis
        if _size > 1:
            uniform_filter1d(result,
                             _size,
                             axis,
                             output=result,
                             mode='nearest',
                             cval=0)
    return result
Ejemplo n.º 13
0
def poc_gradient_zero_crossing(force, ret_details=False):
    """Gradient zero-crossing of indentation part

    1. Apply a moving average filter to the curve
    2. Compute the gradient
    3. Cut off gradient at maximum with a 10 point reserve
    4. Apply a moving average filter to the gradient
    5. The POC is the index of the averaged gradient curve where
       the values are below 1% of the gradient maximum, measured
       from the indentation maximum (not from baseline).
    """
    cp = np.nan
    details = {}
    # Perform a median filter to smooth the array
    filtsize = max(5, int(force.size * .01))
    y = uniform_filter1d(force, size=filtsize)
    if y.size > 1:
        # Cutoff at maximum plus some reserve
        cutoff = y.size - np.argmax(y) + 10
        grad = np.gradient(y)[:-cutoff]
        if grad.size > 50:
            # Use the point where the gradient becomes small enough.
            gradn = uniform_filter1d(grad, size=filtsize)
            thresh = 0.01 * np.max(gradn)
            gradpos = gradn <= thresh
            if np.sum(gradpos):
                # The gradient contains positive values.
                # Flip `gradpos`, because we want the first value from the
                # end of the array.
                # Weight with two times "filtsize//2", because we actually
                # want the rolling median filter from the edge and not at the
                # center of the array (and two times, because we did two
                # filter operations).
                cp = y.size - np.where(gradpos[::-1])[0][0] - cutoff + filtsize

                if ret_details:
                    x = np.arange(gradn.size)
                    details["plot force gradient"] = [x, gradn]
                    details["plot threshold"] = [[x[0], x[-1]],
                                                 [thresh, thresh]]
                    details["plot poc"] = [[cp, cp],
                                           [gradn.min(),
                                            gradn.max()]]

    if ret_details:
        return cp, details
    else:
        return cp
Ejemplo n.º 14
0
    def plot_all_train_dice(self):
        self.log_dir = 'results/' + task_name + '/train/pred/GLUCOLD/' + number + '/'
        self.file_name = os.path.join(self.log_dir, file_name)

        fig = plt.figure(facecolor='w', figsize=(6, 6), dpi=300)
        ax = fig.add_subplot(111)

        for i in range(1, self.out_chn):
            y_name = 'ave_dice_class_' + str(i)

            x, y = self._get_data(y_name)
            y = uniform_filter1d(y, size=self.average_N)
            ax.plot(x, y, label='train_dice_' + str(i))
        ax.legend()
        ax.set_ylim((0, 1))

        ax.set_xlim(left=0)
        ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 1])
        ax.set_ylabel('dice')
        ax.set_xlabel('epochs')
        # plt.xlim(left=0)
        # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.1))
        ax.plot([0, 90000], [0.95, 0.95], 'k-', lw=1, dashes=[2, 2])
        #         ax.plot([0,90000], [0.97, 0.97], 'k-', lw=1, dashes=[2,2])

        plt.rc('font', size=18)
        frame = plt.gca()
        frame.axes.get_yaxis().set_visible(True)
        frame.axes.get_xaxis().set_visible(True)
        # plt.show()
        plt.savefig('all_train_dice.png')
        plt.close()
Ejemplo n.º 15
0
    def plot_all_train_dice(self):
        fig = plt.figure(facecolor='w', figsize=(6, 6), dpi=300)
        ax = fig.add_subplot(111)
        for i in range(1, self.out_chn):
            if self.new_arch:
                y_name = self.task_name + '_out_segmentation_dice_' + str(i)
            else:
                y_name = 'out_' + self.task_name + '_segmentation_dice_' + str(
                    i)

            x, y = self._get_data(y_name)
            x, y = x[::skip_number], y[::skip_number]
            y = uniform_filter1d(y, size=self.average_N)
            ax.plot(x, y, label='train_dice_' + str(i))
        ax.legend()
        ax.set_ylim((0, 1))

        ax.set_xlim(left=0)
        ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 1])
        ax.set_ylabel('dice')
        ax.set_xlabel('steps')
        # plt.xlim(left=0)
        # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.1))
        ax.plot([0, 90000], [0.95, 0.95], 'k-', lw=1, dashes=[2, 2])
        #         ax.plot([0,90000], [0.97, 0.97], 'k-', lw=1, dashes=[2,2])

        plt.rc('font', size=18)
        # plt.show()
        frame = plt.gca()
        frame.axes.get_yaxis().set_visible(True)
        frame.axes.get_xaxis().set_visible(True)
        plt.savefig('all_train_dice.png')
        plt.close()
Ejemplo n.º 16
0
def sp_mvg_avg(v, N, edges='nearest'):
    """
    Use scipy's uniform_filter1d to calculate a moving average, see the docs at
    https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.uniform_filter1d.html
    Handles NaNs by removing them before interpolation.

    Parameters
    ----------
    v : np.ndarray
        data to average.
    N : int
        number of samples per average.
    edges : str, optional
        mode of uniform_filter1d (see docs). The default is 'nearest'.

    Returns
    -------
    avg : np.ndarray
        averaged data.
    """
    m = np.isfinite(v)
    avg = np.empty(v.shape)
    avg[~m] = np.nan
    avg[m] = uniform_filter1d(v[m], size=N, mode=edges)
    return avg
Ejemplo n.º 17
0
def calc_est_fit(estimates, conv_n, tau):
    """Make estimate by fitting exponential convergence to estimates.
    """
    n = len(estimates)

    if n < conv_n:
        return nan, inf

    # iteration number, fit function to inverse this to get k->infinity
    ks = np.arange(1, len(estimates) + 1)

    # smooth data with a running mean
    smoothed_estimates = uniform_filter1d(estimates, n // 2)

    # ignore this amount of the initial estimates and fit later part only
    ni = n // 2

    try:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            # fit the inverse data with a line, weighting recent ests more
            popt, pcov = np.polyfit(x=(1 / ks[ni:]),
                                    y=smoothed_estimates[ni:],
                                    w=ks[ni:],
                                    deg=1,
                                    cov=True)

        # estimate of function at 1 / k = 0 and standard error
        est, err = popt[-1], abs(pcov[-1, -1])**0.5

    except (ValueError, RuntimeError):
        est, err = nan, inf

    return est, err
Ejemplo n.º 18
0
    def plot_all_val_dice(self):
        fig = plt.figure(facecolor='w', figsize=(6, 6), dpi=300)
        ax = fig.add_subplot(111)

        for i in range(1, self.out_chn):
            y_name = 'val_' + self.task_name + '_out_segmentation_dice_' + str(
                i)

            x, y = self._get_va_data(y_name)
            y = uniform_filter1d(y, size=self.average_N)
            ax.plot(x, y, label='val_dice_' + str(i))
        ax.legend()
        ax.set_ylim((0, 1))

        ax.set_xlim(left=0)
        ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 1])
        ax.set_ylabel('dice')
        ax.set_xlabel('steps')
        # plt.xlim(left=0)
        # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.1))
        ax.plot([0, x[-1]], [0.95, 0.95], 'k-', lw=1, dashes=[2, 2])
        #         ax.plot([0,90000], [0.97, 0.97], 'k-', lw=1, dashes=[2,2])

        plt.rc('font', size=18)
        frame = plt.gca()
        frame.axes.get_yaxis().set_visible(True)
        frame.axes.get_xaxis().set_visible(True)
        # plt.show()
        plt.savefig(self.va_log.split('.log')[0] + 'all_valid_dice.png')
        print('save fig at',
              self.va_log.split('.log')[0] + 'all_valid_dice.png')

        plt.close()
Ejemplo n.º 19
0
def parse_labels(signal_files, fs=SAMPLING_FREQ, cutoff=CUTOFF_FREQ):

    cycle_labels = []
    for signal_file in tqdm(signal_files):
    
        df = pd.read_csv(signal_file, header=None)
        if len(df.columns) == 1:
                df = pd.read_csv(signal_file, sep=';', header=None)
                
        hsig = df.iloc[:, -2].values
        vsig = df.iloc[:, -1].values
        
        L = []
        for signal in [hsig, vsig]:
            X = fft(signal)
            freqs = fftfreq(len(signal)) * SAMPLING_FREQ
            
            X = np.abs(X[1:len(signal)//2])
            freqs = freqs[1:len(signal)//2]
            
            # moving average filter
            Xf = uniform_filter1d(X, size=5)
            
            L += [np.mean(Xf[ freqs <= cutoff ])]
        cycle_labels.append(np.max(L))
    cycle_labels = (np.array(cycle_labels) - np.min(cycle_labels)) / (np.max(cycle_labels) - np.min(cycle_labels))
    return cycle_labels
Ejemplo n.º 20
0
    def get_chan_imp_resp(self, sch_burst):
        sch_burst_bl = resize(array(sch_burst), (int(len(sch_burst)/self.OSR),self.OSR))
        correlation_bl = zeros(shape(sch_burst_bl), dtype=np.complex64)
        for ii in xrange(0,self.OSR):
            correlation_bl[:,ii]=correlate(sch_burst_bl[:,ii],self.sync_seq_msk,'same')
        
        correlation_bl = correlation_bl/len(self.sync_seq_msk)
        power_bl_mov_avg = uniform_filter1d(abs(correlation_bl)**2,self.L+1,mode='constant',axis=0)

        print "correlation_bl.argmax()",argmax(abs(correlation_bl))
        print "power_bl_mov_avg.argmax()",(power_bl_mov_avg).argmax()
        print 'unravel_index(correlation_bl.argmax(), correlation_bl.shape)',unravel_index(argmax(abs(correlation_bl)), correlation_bl.shape)
        print 'unravel_index(power_bl_mov_avg.argmax(), power_bl_mov_avg.shape)',unravel_index(power_bl_mov_avg.argmax(), power_bl_mov_avg.shape)
        (r_corrmax, c_corrmax)=unravel_index(argmax(abs(correlation_bl)), correlation_bl.shape)
        (r_powmax, c_powmax)=unravel_index(power_bl_mov_avg.argmax(), power_bl_mov_avg.shape)
        
#        correlation = zeros(shape(sch_burst))
#        correlation = correlate(sch_burst, self.sync_seq_msk_interp,'same')/len(self.sync_seq_msk)
#        print "pozycja maksimum",argmax(abs(correlation))
#        plot(abs(hstack(correlation_bl))*1000)
##        hold(True)
##        plot(abs(sch_burst)*500)
##        print shape(range(0,len(sch_burst),self.OSR))
##        print shape(correlation_bl[:,0])
#        for ii in range(0,self.OSR):
#            if ii == c_powmax:
#                plot(range(ii,len(correlation_bl[:,0])*self.OSR,self.OSR),power_bl_mov_avg[:,ii]*5e6,'g.')
#            else:
#                plot(range(ii,len(correlation_bl[:,0])*self.OSR,self.OSR),power_bl_mov_avg[:,ii]*5e6,'r.')
#        show()
#        figure()
        print 'r_powmax: ',r_powmax
Ejemplo n.º 21
0
def running_average_special(year_df, running=240):
    """
    Method: running_average_special(data)
    Purpose: Applies the 10 running day filter to the data
    Require:
        data: The mdata dictionary
    Version: 05/2021, MJB: Documentation
    """
    i = 1
    longest = 0
    indi = len(year_df)
    depths = list(year_df.columns)
    arr = []
    for depth in depths:
        arr.append(uniform_filter1d(year_df[str(depth)].dropna(), size=running))
    dfdelta = pd.DataFrame(np.column_stack(arr), columns=depths, index=year_df.dropna().index)
    '''
    for depth in depths:
        # Cambiado entre otros index del data al dropna
        series1 = pd.DataFrame(uniform_filter1d(year_df[str(depth)].dropna(), size=running),
                               index=year_df[str(depth)].dropna().index, columns=[str(depth)]).reindex(year_df.index)
        i += 1
        if 'dfdelta' in locals():
            dfdelta = pd.merge(dfdelta, series1, right_index=True, left_index=True)
        else:
            dfdelta = pd.DataFrame(series1)
    '''

    return dfdelta
Ejemplo n.º 22
0
def running_average(data, running=240):
    """
    Method: apply_uniform_filter(data)
    Purpose: Applies the 10 running day filter to the data
    Require:
        data: The mdata dictionary
    Version: 05/2021, MJB: Documentation
    """
    df, depths, _ = list_to_df(data)
    i = 1
    longest = 0
    indi = 0  # Checks the longest time series of all to use it as the base for the plots
    for u in range(0, len(data)):
        if len(data[u]['time']) > longest:
            longest = len(data[u]['time'])
            indi = u
    for depth in depths:
        # Cambiado entre otros index del data al dropna
        series1 = pd.DataFrame(uniform_filter1d(df[str(depth)].dropna(), size=running),
                               index=df[str(depth)].dropna().index, columns=[str(depth)]).reindex(data[indi]['time'])
        i += 1
        if 'dfdelta' in locals():
            dfdelta = pd.merge(dfdelta, series1, right_index=True, left_index=True)
        else:
            dfdelta = pd.DataFrame(series1)

    return dfdelta
Ejemplo n.º 23
0
def types_temp_draw4(all_recs):
    all_recs=types_draw
    # maxi=[]
    # for rec in all_recs:
    #     maxi.append(max(rec[1]))
    # print 'Max value of y-axis is %d' %(max(maxi))

    # x_ticks=['LTE_ATTACH','LTE_DETACH','CSFB','Service_Req','TAU','Path_Switch','LTE_PAGING']
    x_ticks=['TAU','CSFB','Detach','pSwitch']

    with PdfPages('types_temp4.pdf') as pdf:
        font_size='30'
        fig = plt.figure()
        ax=fig.add_subplot(111)
        ax.plot(np.arange(0,288), gaussian_filter1d(all_recs[0],5),'r-',marker='D',markersize=8, markevery=10,linewidth=2)
        ax.plot(np.arange(0,288), gaussian_filter1d(all_recs[1],5),'b--',marker='o',markersize=8, markevery=10,linewidth=2)
        ax.plot(np.arange(0,288), gaussian_filter1d(all_recs[2],5),'g:',  marker='s',markersize=8, markevery=10,linewidth=2)
        ax.plot(np.arange(0,288), uniform_filter1d(all_recs[3],2),'-.',color='#666666',marker='^',markersize=8, markevery=10,linewidth=2)

        plt.subplots_adjust(top=0.73, right=0.9, left=0.21, bottom=0.21)
        plt.xticks(np.arange(0,289,step=48),[0,4,8,12,16,20,24],size=font_size,fontweight='bold')
        # plt.yticks(size=font_size,fontweight='bold')
        plt.yticks(np.arange(0,35001,step=5000),[0,5,10,15,20,25,30,35],size=font_size,fontweight='bold')

        plt.yticks(size=font_size,fontweight='bold')
        plt.xlabel('Time of Day (h)', fontsize=font_size,fontweight='bold')
        plt.ylabel('# of Records (k)',fontsize=font_size,fontweight='bold')
        # plt.ylim(0,100)
        plt.legend(x_ticks,   fontsize=30, loc="upper center", ncol=2,
               bbox_to_anchor=(0.5, 1.6) ,columnspacing=0.01, handletextpad=0.1)
        plt.xlim(0,289)
        plt.grid()
        plt.show()
        pdf.savefig(fig)
Ejemplo n.º 24
0
def apply_uniform_filter(data):
    """
    Method: apply_uniform_filter(data)
    Purpose: Applies the 10 running day filter to the data
    Require:
        data: The mdata dictionary
    Version: 05/2021, MJB: Documentation
    """
    df, depths = temp_difference(data)
    i = 1
    longest = 0
    indi = 0  # Checks the longest time series of all to use it as the base for the plots
    for u in range(0, len(data)):
        if len(data[u]['time']) > longest:
            longest = len(data[u]['time'])
            indi = u
    for depth in depths[:-1]:
        series1 = pd.DataFrame(uniform_filter1d(df[str(depth) + "-" + str(depths[i])].dropna(), size=240),
                               index=df[str(depth) + "-" + str(depths[i])].dropna().index, columns=[str(depth) + "-" + str(depths[i])]).reindex(data[indi]['time'])
        #series1 = pd.DataFrame(uniform_filter1d(df[str(depth) + "-" + str(depths[i])], size=240),
        #                       index=data[indi]['time'], columns=[str(depth) + "-" + str(depths[i])])
        i += 1
        if 'dfdelta' in locals():
            dfdelta = pd.merge(dfdelta, series1, right_index=True, left_index=True)
        else:
            dfdelta = pd.DataFrame(series1)

    return dfdelta
Ejemplo n.º 25
0
    def detect(self, threshold, combine=30, pre_avg=100, pre_max=30,
               post_avg=30, post_max=70, delay=0):
        """
        Detects the onsets.

        :param threshold: threshold for peak-picking
        :param combine:   only report 1 onset for N miliseconds
        :param pre_avg:   use N miliseconds past information for moving average
        :param pre_max:   use N miliseconds past information for moving maximum
        :param post_avg:  use N miliseconds future information for mov. average
        :param post_max:  use N miliseconds future information for mov. maximum
        :param delay:     report the onset N miliseconds delayed

        In online mode, post_avg and post_max are set to 0.

        Implements the peak-picking method described in:

        "Evaluating the Online Capabilities of Onset Detection Methods"
        Sebastian Böck, Florian Krebs and Markus Schedl
        Proceedings of the 13th International Society for Music Information
        Retrieval Conference (ISMIR), 2012

        """
        # online mode?
        if self.online:
            post_max = 0
            post_avg = 0
        # convert timing information to frames
        pre_avg = int(round(self.fps * pre_avg / 1000.))
        pre_max = int(round(self.fps * pre_max / 1000.))
        post_max = int(round(self.fps * post_max / 1000.))
        post_avg = int(round(self.fps * post_avg / 1000.))
        # convert to seconds
        combine /= 1000.
        delay /= 1000.
        # init detections
        self.detections = []
        # moving maximum
        max_length = pre_max + post_max + 1
        max_origin = int(np.floor((pre_max - post_max) / 2))
        mov_max = maximum_filter1d(self.activations, max_length,
                                   mode='constant', origin=max_origin)
        # moving average
        avg_length = pre_avg + post_avg + 1
        avg_origin = int(np.floor((pre_avg - post_avg) / 2))
        mov_avg = uniform_filter1d(self.activations, avg_length,
                                   mode='constant', origin=avg_origin)
        # detections are activation equal to the maximum
        detections = self.activations * (self.activations == mov_max)
        # detections must be greater or equal than the mov. average + threshold
        detections = detections * (detections >= mov_avg + threshold)
        # convert detected onsets to a list of timestamps
        last_onset = 0
        for i in np.nonzero(detections)[0]:
            onset = float(i) / float(self.fps) + delay
            # only report an onset if the last N miliseconds none was reported
            if onset > last_onset + combine:
                self.detections.append(onset)
                # save last reported onset
                last_onset = onset
Ejemplo n.º 26
0
 def filter(self, average_size):
     (times, values) = zip(*self.values)
     #times = [t / 1000 for t in range(0, average_size // 2)] + [t + average_size // 2 / 1000 for t in times]
     #values = [values[0] for v in range(0, average_size // 2)] + list(values)
     filtered_data = uniform_filter1d(values, size=average_size, mode='nearest')
     newdata = list(zip(times, filtered_data))
     return Signal(newdata)
Ejemplo n.º 27
0
def rolling_mean(data, windowsize, sample_rate):
    '''calculates rolling mean

    Function to calculate the rolling mean (also: moving average) over the passed data.

    Parameters
    ----------
    data : 1-dimensional numpy array or list
        sequence containing data over which rolling mean is to be computed

    windowsize : int or float
        the window size to use, in seconds
        calculated as windowsize * sample_rate

    sample_rate : int or float
        the sample rate of the data set

    Returns
    -------
    out : 1-d numpy array
        sequence containing computed rolling mean

    Examples
    --------
    >>> data, _ = load_exampledata(example = 1)
    >>> rmean = rolling_mean(data, windowsize=0.75, sample_rate=100)
    >>> rmean[100:110]
    array([514.49333333, 514.49333333, 514.49333333, 514.46666667,
           514.45333333, 514.45333333, 514.45333333, 514.45333333,
           514.48      , 514.52      ])
    '''

    rol_mean = uniform_filter1d(np.asarray(data, dtype='float'),
                                size=int(windowsize * sample_rate))
    return rol_mean
Ejemplo n.º 28
0
def get_smoothed_running_minimum(timeseries, tau1=30, tau2=100):
    result = minimum_filter1d(uniform_filter1d(timeseries,
                                               tau1,
                                               mode='nearest'),
                              tau2,
                              mode='reflect')
    return result
Ejemplo n.º 29
0
def legacy_bandpass_fftw(image, lshort, llong, threshold=None):
    """Remove noise and background variation.

    Convolve with a Gaussian to remove short-wavelength noise and subtract out
    long-wavelength variations, retaining features of intermediate scale.

    This implementation performs a Fourier transform using FFTW
    (Fastest Fourier Transform in the West). Without FFTW and pyfftw, it
    will raise an ImportError

    In benchmarks using typical inputs, it was found to be slower than the
    ``bandpass`` function in this module.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the bandpassed image

    See Also
    --------
    bandpass, legacy_bandpass
    """
    if not FFTW_AVAILABLE:
        raise ImportError("This implementation requires pyfftw.")
    lshort = validate_tuple(lshort, image.ndim)
    llong = validate_tuple(llong, image.ndim)
    if np.any([x * 2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1 / 256.
    # Perform a rolling average (boxcar) with kernel size = 2*llong + 1
    boxcar = np.asarray(image)
    for (axis, size) in enumerate(llong):
        boxcar = uniform_filter1d(boxcar,
                                  size * 2 + 1,
                                  axis,
                                  mode='nearest',
                                  cval=0)
    # Perform a gaussian filter
    gaussian = ifftn(fourier_gaussian(fftn(image), lshort)).real

    result = gaussian - boxcar
    return np.where(result > threshold, result, 0)
def extract_lines(pc, window_size=3, dot_min=0.95, **kwargs):
    """Extract a first approximation of all lines in 2D line string

    Args:
        pc (ndarray): 2D Line String
        window_size (int, optional): Smoothing window for Line. Defaults to 3.
        dot_min (float, optional): Minimum dot prodcut for joining lines. Defaults to 0.88.

    Returns:
        List: List of lines(dict)
    """

    t1 = time.perf_counter()
    pc_shift = np.roll(pc, -1, axis=0)
    diff = pc_shift - pc
    diff_vec, length = normalized(diff)
    idx_max = np.argmax(length)
    assert idx_max != 0 or idx_max != length.shape[0] - \
        1, "LineString is not continuously connected"
    t2 = time.perf_counter()
    x = uniform_filter1d(diff[:, 0], size=window_size)
    y = uniform_filter1d(diff[:, 1], size=window_size)
    diff_smooth = np.column_stack((x, y))
    t3 = time.perf_counter()

    diff_smooth, length = normalized(diff_smooth)
    diff_smooth_shift = np.roll(diff_smooth, -1, axis=0)
    acos = np.einsum('ij, ij->i', diff_smooth, diff_smooth_shift)
    t4 = time.perf_counter()

    mask = acos > dot_min
    np_diff = np.diff(np.hstack(([False], mask, [False])))
    idx_pairs = np.where(np_diff)[0].reshape(-1, 2)

    t5 = time.perf_counter()
    logging.debug("IDX Pairs %s", (idx_pairs))
    fit_lines = [fit_line(pc, idx) for idx in idx_pairs if idx[1] - idx[0] > 1]
    t6 = time.perf_counter()

    # ms1 = (t2-t1) * 1000
    # ms2 = (t3-t2) * 1000
    # ms3 = (t4-t3) * 1000
    # ms4 = (t5-t4) * 1000
    # ms5 = (t6-t5) * 1000
    # print(ms1, ms2, ms3, ms4, ms5)
    return fit_lines
Ejemplo n.º 31
0
def detrend_ma(data, window):

    from scipy.ndimage.filters import uniform_filter1d

    for i in range(data.shape[1]):
        data[:, i] = data[:, i] - uniform_filter1d(data[:, i], size=window)

    return data
Ejemplo n.º 32
0
def window_stdev(arr, radius):
    from scipy.ndimage.filters import uniform_filter1d

    #array = arr, radius = half width of window in bins
    #windows mean
    c1 = uniform_filter1d(arr,
                          2 * radius,
                          mode='constant',
                          origin=-radius,
                          axis=0)
    #windowed square mean
    c2 = uniform_filter1d(arr * arr,
                          2 * radius,
                          mode='constant',
                          origin=-radius,
                          axis=0)
    #std = windowed square mean - square(windowed mean)
    return ((c2 - c1 * c1)**.5)[:-2 * radius + 1]
Ejemplo n.º 33
0
 def _running_mean(self, ):
     """
     From https://stackoverflow.com/a/43200476/13186064
     """
     return uniform_filter1d(
         self._reward_array,
         self.window_size,
         mode='constant',
         origin=-(self.window_size // 2))[:-(self.window_size - 1)]
 def running_min(X, tau1, tau2):
     ###DEBUGGING IMPLEMENTATION###
     # return minimum_filter1d(X,tau2,mode = 'nearest')
     ###PREVIOUS IMPLEMENTATION###
     mode = 'nearest'
     result = minimum_filter1d(uniform_filter1d(X, tau1, mode=mode),
                               tau2,
                               mode='reflect')
     return result
Ejemplo n.º 35
0
def legacy_bandpass_fftw(image, lshort, llong, threshold=None):
    """Remove noise and background variation.

    Convolve with a Gaussian to remove short-wavelength noise and subtract out
    long-wavelength variations, retaining features of intermediate scale.

    This implementation performs a Fourier transform using FFTW
    (Fastest Fourier Transform in the West). Without FFTW and pyfftw, it
    will raise an ImportError

    In benchmarks using typical inputs, it was found to be slower than the
    ``bandpass`` function in this module.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the bandpassed image

    See Also
    --------
    bandpass, legacy_bandpass
    """
    if not FFTW_AVAILABLE:
        raise ImportError("This implementation requires pyfftw.")
    lshort = validate_tuple(lshort, image.ndim)
    llong = validate_tuple(llong, image.ndim)
    if np.any([x*2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1/256.
    # Perform a rolling average (boxcar) with kernel size = 2*llong + 1
    boxcar = np.asarray(image)
    for (axis, size) in enumerate(llong):
        boxcar = uniform_filter1d(boxcar, size*2+1, axis, mode='nearest',
                                  cval=0)
    # Perform a gaussian filter
    gaussian = ifftn(fourier_gaussian(fftn(image), lshort)).real

    result = gaussian - boxcar
    return np.where(result > threshold, result, 0)
Ejemplo n.º 36
0
def applyFunction(data,f): 
	if re.match("^log$",f):
		return np.log(data);
	elif re.match("^log10$",f):
		return np.log10(data);
	elif re.match("^log2$",f):
		return np.log2(data);
	elif re.match("^exp$",f):
		return np.exp(data);
	else: 
		m = re.match("^([<>=!]+)([-.eE0-9]+)$",f);
		if m:
			if m.group(1)=="<=":
				return (data<=float(m.group(2))).astype(int);
			elif m.group(1)==">=":
				return (data>=float(m.group(2))).astype(int);
			elif m.group(1)=="<":
				return (data<float(m.group(2))).astype(int);
			elif m.group(1)==">":
				return (data>float(m.group(2))).astype(int);
			elif m.group(1)=="!=":
				return (data!=float(m.group(2))).astype(int);
			elif m.group(1)=="==":
				return (data==float(m.group(2))).astype(int);
		m = re.match("^([\^\*\+\-\/])([-.eE0-9]+)$",f);
		if m:
			if m.group(1)=="^":
				return data**float(m.group(2));
			elif m.group(1)=="*":
				return data*float(m.group(2));
			elif m.group(1)=="+":
				return data+float(m.group(2));
			elif m.group(1)=="/":
				return data/float(m.group(2));
			elif m.group(1)=="-":
				return data-float(m.group(2));
		m = re.match("^default([-.eE0-9]*)$",f);
		if m:
			invalid = np.isnan( data )
			data2 = data;
			data2[ invalid ] = float(m.group(1));
			return data2;
		m = re.match("^smoothG([-.eE0-9]*)$",f);
		if m:
			return gaussian_filter(data, float(m.group(1)), mode='reflect', truncate=4.0)
		m = re.match("^smoothU([-.eE0-9]*)$",f);
		if m:
			return uniform_filter1d(data, int(m.group(1)), mode='reflect')
		m = re.match("^smoothZ([-.eE0-9]*)$",f);
		if m:
			return z_scoreize_online(data, int(m.group(1)))
		m = re.match("^pow([-.eE0-9]*)$",f);
		if m:
			return np.power(float(m.group(1)),data)
		raise Exception("Unknown function provided: %s"%f)
Ejemplo n.º 37
0
 def compute_mask(self, frac=0.5, filtwidth=5):
     """
     return a mask showing where noise spikes to frac over the local
     background
     """
     smoothed_noise = gaussian_filter1d(self.error, filtwidth)
     mask = ((self.error >= (1 + frac) * smoothed_noise)
             | (self.error <= 0)
             | (self.error >= self.large_err)
             | (self.spectrum == 0))
     mask_filtered = uniform_filter1d(mask.astype(float),
                                      max(3, filtwidth))
     return mask_filtered > 0.5 / filtwidth
Ejemplo n.º 38
0
def local_average(f,n,size=None,origin=None,mode='reflect'):
    """
    get the local, windowed function of the average, +/- n

    Args:
        f: what we want the stdev of
        n: window size (in either direction)
        mode: see uniform_filter1d
    Returns:
        array, same size as f, with the dat we want
    """
    if (size is None):
        size = 2*n
    if (origin is None):
        origin = 0
    return uniform_filter1d(f, size=size, mode=mode, origin=origin)
Ejemplo n.º 39
0
def Du(x, y, Np, ndiv=1, axis=-1, mode='strip', cval=0.):
    """
    Does the central derrivative after performing a uniform
    filter (average of Np points).
    Date needs to be equally spaced and Np odd.
    """
    strip = False
    if mode == 'strip':
        strip = True
        mode = 'reflect'
    y = filters.uniform_filter1d(y, Np, axis=axis, mode=mode, cval=cval)
    #if Np%2 == 0: x -= (x[1]-x[0])/2 # or x = filters.uniform_filter1d(x, Np, axis=axis, mode='nearest')
    if strip:
        x, y = _do_strip(x, y, Np, axis=axis)
    Np = ndiv*2+1
    x, D = Dn(x, y, Np, ndiv=ndiv, axis=axis, mode=mode, cval=cval)
    if strip:
        x, D = _do_strip(x, D, Np, axis=axis)
    return x, D
Ejemplo n.º 40
0
def bandpass(image, lshort, llong, threshold=None):
    """Convolve with a Gaussian to remove short-wavelength noise,
    and subtract out long-wavelength variations,
    retaining features of intermediate scale.

    Parmeters
    ---------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied

    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    ndarray, the bandpassed image
    """
    lshort = validate_tuple(lshort, image.ndim)      
    llong = validate_tuple(llong, image.ndim)
    if np.any([x*2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1/256.
    settings = dict(mode='nearest', cval=0)
    axes = range(image.ndim)
    sizes = [x*2+1 for x in llong]
    boxcar = np.asarray(image)
    for (axis, size) in zip(axes, sizes):
        boxcar = uniform_filter1d(boxcar, size, axis, **settings)
    gaussian = ifftn(fourier_gaussian(fftn(image), lshort)).real
    result = gaussian - boxcar
    return np.where(result > threshold, result, 0)
Ejemplo n.º 41
0
def threshold_minimum(image, nbins=256, max_iter=10000):
    """Return threshold value based on minimum method.

    The histogram of the input `image` is computed and smoothed until there are
    only two maxima. Then the minimum in between is the threshold value.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    max_iter: int, optional
        Maximum number of iterations to smooth the histogram.

    Returns
    -------
    threshold : float
        Upper threshold value. All pixels with an intensity higher than
        this value are assumed to be foreground.

    Raises
    ------
    RuntimeError
        If unable to find two local maxima in the histogram or if the
        smoothing takes more than 1e4 iterations.

    References
    ----------
    .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding
           algorithms," CVGIP: Graphical Models and Image Processing,
           vol. 55, pp. 532-537, 1993.
    .. [2] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell
           images", Annals of the New York Academy of Sciences 128: 1035-1053
           DOI:10.1111/j.1749-6632.1965.tb11715.x

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()
    >>> thresh = threshold_minimum(image)
    >>> binary = image > thresh
    """

    def find_local_maxima_idx(hist):
        # We can't use scipy.signal.argrelmax
        # as it fails on plateaus
        maximum_idxs = list()
        direction = 1

        for i in range(hist.shape[0] - 1):
            if direction > 0:
                if hist[i + 1] < hist[i]:
                    direction = -1
                    maximum_idxs.append(i)
            else:
                if hist[i + 1] > hist[i]:
                    direction = 1

        return maximum_idxs

    hist, bin_centers = histogram(image.ravel(), nbins)

    smooth_hist = np.copy(hist).astype(np.float64)

    for counter in range(max_iter):
        smooth_hist = ndif.uniform_filter1d(smooth_hist, 3)
        maximum_idxs = find_local_maxima_idx(smooth_hist)
        if len(maximum_idxs) < 3:
            break

    if len(maximum_idxs) != 2:
        raise RuntimeError('Unable to find two maxima in histogram')
    elif counter == max_iter - 1:
        raise RuntimeError('Maximum iteration reached for histogram'
                           'smoothing')

    # Find lowest point between the maxima
    threshold_idx = np.argmin(smooth_hist[maximum_idxs[0]:maximum_idxs[1] + 1])

    return bin_centers[maximum_idxs[0] + threshold_idx]
Ejemplo n.º 42
0
def _n_profiles_H_V(arrayH, arrayV, virtual_pixelsize,
                    zlabel=r'z',
                    titleH='Horiz', titleV='Vert',
                    nprofiles=5, filter_width=0,
                    remove1stOrderDPC=False,
                    saveFileSuf='',
                    saveFigFlag=True):

    xxGrid, yyGrid = wpu.grid_coord(arrayH, virtual_pixelsize)

    fit_coefs = [[], []]
    data2saveH = None
    data2saveV = None
    labels_H = None
    labels_V = None

    plt.rcParams['lines.markersize'] = 4
    plt.rcParams['lines.linewidth'] = 2

    # Horizontal
    if np.all(np.isfinite(arrayH)):

        plt.figure(figsize=(12, 12*9/16))

        xvec = xxGrid[0, :]
        data2saveH = np.c_[xvec]
        header = ['x [m]']

        if filter_width != 0:
            arrayH_filtered = uniform_filter1d(arrayH, filter_width, 0)
        else:
            arrayH_filtered = arrayH

        ls_cycle, lc_jet = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],
                                                ncurves=nprofiles,
                                                cmap_str='gist_rainbow_r')

        lc = []
        labels_H = []
        for i, row in enumerate(np.linspace(filter_width//2,
                                            np.shape(arrayV)[0]-filter_width//2-1,
                                            nprofiles + 2, dtype=int)):

            if i == 0 or i == nprofiles + 1:
                continue

            yvec = arrayH_filtered[row, :]

            lc.append(next(lc_jet))
            p01 = np.polyfit(xvec, yvec, 1)
            fit_coefs[0].append(p01)

            if remove1stOrderDPC:
                yvec -= p01[0]*xvec + p01[1]

            plt.plot(xvec*1e6, yvec, next(ls_cycle), color=lc[i-1],
                     label=str(row))

            if not remove1stOrderDPC:
                plt.plot(xvec*1e6, p01[0]*xvec + p01[1], '--',
                         color=lc[i-1], lw=3)

            data2saveH = np.c_[data2saveH, yvec]
            header.append(str(row))
            labels_H.append(str(row))

        if remove1stOrderDPC:
            titleH = titleH + ', 2nd order removed'
        plt.legend(title='Pixel Y', loc=0, fontsize=12)

        plt.xlabel(r'x [$\mu m$]', fontsize=18)
        plt.ylabel(zlabel, fontsize=18)
        plt.title(titleH + ', Filter Width = {:d} pixels'.format(filter_width),
                  fontsize=20)

        if saveFigFlag:
            wpu.save_figs_with_idx(saveFileSuf + '_H')

        plt.show(block=False)

        header.append(zlabel + ', Filter Width = {:d} pixels'.format(filter_width))

        wpu.save_csv_file(data2saveH,
                          wpu.get_unique_filename(saveFileSuf +
                                                  '_WF_profiles_H', 'csv'),
                          headerList=header)

        plt.figure(figsize=(12, 12*9/16))
        plt.imshow(arrayH, cmap='RdGy',
                   vmin=wpu.mean_plus_n_sigma(arrayH, -3),
                   vmax=wpu.mean_plus_n_sigma(arrayH, 3))
        plt.xlabel('Pixel')
        plt.ylabel('Pixel')
        plt.title(titleH + ', Profiles Position')

        currentAxis = plt.gca()

        _, lc_jet = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],
                                         ncurves=nprofiles,
                                         cmap_str='gist_rainbow_r')

        for i, row in enumerate(np.linspace(filter_width//2,
                                            np.shape(arrayV)[0]-filter_width//2-1,
                                            nprofiles + 2, dtype=int)):

            if i == 0 or i == nprofiles + 1:
                continue

            currentAxis.add_patch(Rectangle((-.5, row - filter_width//2 - .5),
                                            np.shape(arrayH)[1], filter_width,
                                            facecolor=lc[i-1], alpha=.5))
            plt.axhline(row, color=lc[i-1])

        if saveFigFlag:
            wpu.save_figs_with_idx(saveFileSuf + '_H')

        plt.show(block=True)

    # Vertical
    if np.all(np.isfinite(arrayV)):

        plt.figure(figsize=(12, 12*9/16))

        xvec = yyGrid[:, 0]
        data2saveV = np.c_[xvec]
        header = ['y [m]']

        if filter_width != 0:
            arrayV_filtered = uniform_filter1d(arrayV, filter_width, 1)
        else:
            arrayV_filtered = arrayV

        ls_cycle, lc_jet = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],
                                                ncurves=nprofiles,
                                                cmap_str='gist_rainbow_r')

        lc = []
        labels_V = []
        for i, col in enumerate(np.linspace(filter_width//2,
                                            np.shape(arrayH)[1]-filter_width//2-1,
                                            nprofiles + 2, dtype=int)):

            if i == 0 or i == nprofiles + 1:
                continue

            yvec = arrayV_filtered[:, col]

            lc.append(next(lc_jet))
            p10 = np.polyfit(xvec, yvec, 1)
            fit_coefs[1].append(p10)

            if remove1stOrderDPC:
                yvec -= p10[0]*xvec + p10[1]

            plt.plot(xvec*1e6, yvec, next(ls_cycle), color=lc[i-1],
                     label=str(col))

            if not remove1stOrderDPC:
                plt.plot(xvec*1e6, p10[0]*xvec + p10[1], '--',
                         color=lc[i-1], lw=3)

            data2saveV = np.c_[data2saveV, yvec]
            header.append(str(col))
            labels_V.append(str(col))

        if remove1stOrderDPC:
            titleV = titleV + ', 2nd order removed'

        plt.legend(title='Pixel X', loc=0, fontsize=12)

        plt.xlabel(r'y [$\mu m$]', fontsize=18)
        plt.ylabel(zlabel, fontsize=18)

        plt.title(titleV + ', Filter Width = {:d} pixels'.format(filter_width),
                  fontsize=20)
        if saveFigFlag:
            wpu.save_figs_with_idx(saveFileSuf + '_Y')
        plt.show(block=False)

        header.append(zlabel + ', Filter Width = {:d} pixels'.format(filter_width))

        wpu.save_csv_file(data2saveV,
                          wpu.get_unique_filename(saveFileSuf +
                                                  '_WF_profiles_V', 'csv'),
                          headerList=header)

        plt.figure(figsize=(12, 12*9/16))
        plt.imshow(arrayV, cmap='RdGy',
                   vmin=wpu.mean_plus_n_sigma(arrayV, -3),
                   vmax=wpu.mean_plus_n_sigma(arrayV, 3))
        plt.xlabel('Pixel')
        plt.ylabel('Pixel')
        plt.title(titleV + ', Profiles Position')

        currentAxis = plt.gca()

        for i, col in enumerate(np.linspace(filter_width//2,
                                            np.shape(arrayH)[1]-filter_width//2-1,
                                            nprofiles + 2, dtype=int)):

            if i == 0 or i == nprofiles + 1:
                continue


            currentAxis.add_patch(Rectangle((col - filter_width//2 - .5, -.5),
                                            filter_width, np.shape(arrayV)[0],
                                            facecolor=lc[i-1], alpha=.5))
            plt.axvline(col, color=lc[i-1])

        if saveFigFlag:
            wpu.save_figs_with_idx(saveFileSuf + '_Y')

        plt.show(block=True)

    return data2saveH, data2saveV, labels_H, labels_V, fit_coefs
Ejemplo n.º 43
0
from sys import argv
from scipy.ndimage import filters
import re

if len(argv) < 2:
    print('Usage: %s logfile' %argv[0])
    exit(1)

loss = []
iter = []
additional_loss = {}
lines = open(argv[1],'r').read()
r = re.compile('Train net output .* (.*) = .* = (.*) loss\)')
for l in lines.split('\n'):
    s = l.split()
    losses = r.findall(l)
    if losses:
        if not losses[0][0] in additional_loss:
            additional_loss[losses[0][0]] = []
        additional_loss[losses[0][0]].append(float(losses[0][1]))
    
    if len(s)>4 and  s[-2] == '=' and s[-3] == 'loss':
        loss.append(float(s[-1]))
        iter.append(float(s[-4][:-1]))
plot(iter, loss, label='loss')
plot(iter, filters.uniform_filter1d(loss, 100), label='smooth loss')
for i in additional_loss:
    plot(iter[:len(additional_loss[i])], additional_loss[i], label=i)
legend()
show()
Ejemplo n.º 44
0
            results.append(onevsallresult)

    if test is not None:
        score = []
        results = [list(res) for res in zip(*results)]
        for ind, result in enumerate(results):
            if result.count(1) == 1:
                if target[ind] == labels[result.index(1)]:
                    score.append(1)
                else:
                    score.append(0)
            else:
                score.append(0)
        print "score : ", float(sum(score))/len(score)
        
    # print "correct : ", float(len(target) - np.sum(np.abs(np.subtract(onevsall, onevsallresult))))/len(target)
    # print "correct : ", len(target) - np.sum(np.abs(np.subtract(onevsall, onevsallresult))), "/", len(target)


onevsall(train, target, test)
# Pourquoi ca ne marche pas ?
# Resultat non lineairement separable ? Reseau pas assez 'deep' ? Entrainement pas assez grand ?



train = [uniform_filter1d(np.absolute(np.fft.fft(t)), 7) for t in train]
test = [uniform_filter1d(np.absolute(np.fft.fft(t)), 7) for t in test]


onevsall(train, target, test)
Ejemplo n.º 45
0
    def detect(self, threshold, combine=0.03, pre_avg=0.15, pre_max=0.01,
               post_avg=0, post_max=0.05, delay=0):
        """
        Detects the onsets.

        :param threshold: threshold for peak-picking
        :param combine:   only report 1 onset for N seconds
        :param pre_avg:   use N seconds past information for moving average
        :param pre_max:   use N seconds past information for moving maximum
        :param post_avg:  use N seconds future information for moving average
        :param post_max:  use N seconds future information for moving maximum
        :param delay:     report the onset N seconds delayed

        In online mode, post_avg and post_max are set to 0.

        Implements the peak-picking method described in:

        "Evaluating the Online Capabilities of Onset Detection Methods"
        Sebastian Böck, Florian Krebs and Markus Schedl
        Proceedings of the 13th International Society for Music Information
        Retrieval Conference (ISMIR), 2012

        """
        # online mode?
        if self.online:
            post_max = 0
            post_avg = 0
        # convert timing information to frames
        pre_avg = int(round(self.fps * pre_avg))
        pre_max = int(round(self.fps * pre_max))
        post_max = int(round(self.fps * post_max))
        post_avg = int(round(self.fps * post_avg))
        # convert to seconds
        combine /= 1000.
        delay /= 1000.
        # init detections
        self.detections = []
        # moving maximum
        max_length = pre_max + post_max + 1
        max_origin = int(np.floor((pre_max - post_max) / 2))
        mov_max = maximum_filter1d(self.activations, max_length,
                                   mode='constant', origin=max_origin)
        # moving average
        avg_length = pre_avg + post_avg + 1
        avg_origin = int(np.floor((pre_avg - post_avg) / 2))
        mov_avg = uniform_filter1d(self.activations, avg_length,
                                   mode='constant', origin=avg_origin)
        # detections are activation equal to the moving maximum
        detections = self.activations * (self.activations == mov_max)
        # detections must be greater or equal than the mov. average + threshold
        detections *= (detections >= mov_avg + threshold)
        # convert detected onsets to a list of timestamps
        detections = np.nonzero(detections)[0].astype(np.float) / self.fps
        # shift if necessary
        if delay != 0:
            detections += delay
        # always use the first detection and all others if none was reported
        # within the last `combine` seconds
        if detections.size > 1:
            # filter all detections which occur within `combine` seconds
            combined_detections = detections[1:][np.diff(detections) > combine]
            # add them after the first detection
            self.detections = np.append(detections[0], combined_detections)
        else:
            self.detections = detections
Ejemplo n.º 46
0
def threshold_minimum(image, nbins=256, bias='min', max_iter=10000):
    """Return threshold value based on minimum method.

    The histogram of the input `image` is computed and smoothed until there are
    only two maxima. Then the minimum in between is the threshold value.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    bias : {'min', 'mid', 'max'}, optional
        'min', 'mid', 'max' return lowest, middle, or highest pixel value
        with minimum histogram value.
    max_iter: int, optional
        Maximum number of iterations to smooth the histogram.

    Returns
    -------
    threshold : float
        Upper threshold value. All pixels with an intensity higher than
        this value are assumed to be foreground.

    Raises
    ------
    RuntimeError
        If unable to find two local maxima in the histogram or if the
        smoothing takes more than 1e4 iterations.

    References
    ----------
    .. [1] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell images",
           Annals of the New York Academy of Sciences 128: 1035-1053

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()
    >>> thresh = threshold_minimum(image)
    >>> binary = image > thresh
    """

    def find_local_maxima(hist):
        # We can't use scipy.signal.argrelmax
        # as it fails on plateaus
        maximums = list()
        direction = 1
        for i in range(hist.shape[0] - 1):
            if direction > 0:
                if hist[i + 1] < hist[i]:
                    direction = -1
                    maximums.append(i)
            else:
                if hist[i + 1] > hist[i]:
                    direction = 1
        return maximums

    if bias not in ('min', 'mid', 'max'):
        raise ValueError("Unknown bias: {0}".format(bias))

    hist, bin_centers = histogram(image.ravel(), nbins)

    smooth_hist = np.copy(hist)
    for counter in range(max_iter):
        smooth_hist = ndif.uniform_filter1d(smooth_hist, 3)
        maximums = find_local_maxima(smooth_hist)
        if len(maximums) < 3:
            break
    if len(maximums) != 2:
        raise RuntimeError('Unable to find two maxima in histogram')
    elif counter == max_iter - 1:
        raise RuntimeError('Maximum iteration reached for histogram'
                           'smoothing')

    # Find lowest point between the maxima, biased to the low end (min)
    minimum = smooth_hist[maximums[0]]
    threshold = maximums[0]
    for i in range(maximums[0], maximums[1]+1):
        if smooth_hist[i] < minimum:
            minimum = smooth_hist[i]
            threshold = i

    if bias == 'min':
        return bin_centers[threshold]
    else:
        upper_bound = threshold
        while smooth_hist[upper_bound] == smooth_hist[threshold]:
            upper_bound += 1
        upper_bound -= 1
        if bias == 'max':
            return bin_centers[upper_bound]
        elif bias == 'mid':
            return bin_centers[(threshold + upper_bound) // 2]