Exemple #1
0
def calculate_length(points, times=1, w=5):
    """
    Calculates the length of a path.
    
    Paths sampled from pixel grids may contain notable measuring error, if euclidean distances are calculated
    naively. This method uses an adapted approach from [Cornelisse1984]_, by repeatedly smoothing
    the coordinates with a moving average filter before calculating the euclidean distance.
    
    .. [Cornelisse1984] Cornelisse and van den Berg (1984) Journal of Microscopy
       `10.1111/j.1365-2818.1984.tb00544.x <https://dx.doi.org/10.1111/j.1365-2818.1984.tb00544.x>`_
    
    :param points: Input points, a numpy array (X, 2) 
    :param times: Times smoothing should be applied
    :param w: window width of the moving average filter
    :return: Length of the input path
    
    >>> calculate_length(np.array([[1.0, 1.0],
    ...                            [5.0, 5.0]]))
    5.656854249492381
    """
    # adapted method from Cornelisse and van den Berg
    if (len(points) - 2) > w:
        for _ in range(times):
            points[:, 0] = uniform_filter1d(points[:, 0], w, mode='nearest')
            points[:, 1] = uniform_filter1d(points[:, 1], w, mode='nearest')

    result = np.sqrt((np.diff(points, axis=0)**2.0).sum(axis=1)).sum()

    return result
Exemple #2
0
def test_multiple_modes_sequentially():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying the filters with
    # different modes sequentially
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    modes = ['reflect', 'wrap']

    expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
    expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.gaussian_filter(arr, 1, mode=modes))

    expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
    expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.uniform_filter(arr, 5, mode=modes))

    expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.maximum_filter(arr, size=5, mode=modes))

    expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_sequentially():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying the filters with
    # different modes sequentially
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    modes = ['reflect', 'wrap']

    expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
    expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.gaussian_filter(arr, 1, mode=modes))

    expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
    expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.uniform_filter(arr, 5, mode=modes))

    expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.maximum_filter(arr, size=5, mode=modes))

    expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.minimum_filter(arr, size=5, mode=modes))
Exemple #4
0
    def _orography_gradients(self) -> Tuple[Cube, Cube]:
        """
        Calculates the dimensionless gradient of self.topography along both
        spatial axes, smoothed along the perpendicular axis.  If spatial
        coordinates are not in the same units as topography height (m),
        converts coordinate units in place.

        Returns:
            - 2D cube of dimensionless topography gradients in the
              positive x direction
            - 2D cube of dimensionless topography gradients in the
              positive y direction
        """
        self.topography.coord(axis="x").convert_units(self.topography.units)
        xdim = self.topography.coord_dims(self.topography.coord(axis="x"))[0]
        self.topography.coord(axis="y").convert_units(self.topography.units)
        ydim = self.topography.coord_dims(self.topography.coord(axis="y"))[0]

        # smooth topography by +/- one grid cell along the perpendicular axis
        # before calculating each gradient (as done in STEPS)
        topo_smx = uniform_filter1d(self.topography.data, 3, axis=ydim)
        topo_smx_cube = self.topography.copy(data=topo_smx)
        gradx, _ = GradientBetweenAdjacentGridSquares(
            regrid=True)(topo_smx_cube)
        gradx.units = "1"

        topo_smy = uniform_filter1d(self.topography.data, 3, axis=xdim)
        topo_smy_cube = self.topography.copy(data=topo_smy)
        _, grady = GradientBetweenAdjacentGridSquares(
            regrid=True)(topo_smy_cube)
        grady.units = "1"

        return gradx, grady
Exemple #5
0
    def analyze(self, sampling_freq, samples):
        (left, right) = samples.transpose()
        left_fft = np.array_split(np.abs(np.fft.fft(left, norm='ortho')), 2)[0]
        right_fft = np.array_split(np.abs(np.fft.fft(right, norm='ortho')), 2)[0]

        freqs = np.split(np.fft.fftfreq(int(samples.size / 2)) * sampling_freq, 2)[0]

        return freqs, uniform_filter1d(left_fft, size=2), uniform_filter1d(right_fft, size=10)
Exemple #6
0
def clean_spectrum(spectrum, size=10):
    """Replaces NaNs with the average of surrounding points"""
    mask = ~np.isfinite(spectrum)
    while np.sum(mask) > 0:
        masked = np.where(mask, 0, spectrum)
        weights = 1. / (1. - uniform_filter1d(mask.astype(float), size))
        filtered = weights * uniform_filter1d(masked, size)
        spectrum[mask] = filtered[mask]
        mask = ~np.isfinite(spectrum)
    def analyse_image(self, image: Image):
        """Analyse given image.

        Args:
            image: Image to analyse
        """

        # clean data
        data = self._clean(image.data)

        # get projections
        xproj = np.mean(data, axis=0)
        yproj = np.mean(data, axis=1)
        nx = len(xproj)
        ny = len(yproj)

        # remove background gradient
        xclean = xproj - ndimage.uniform_filter1d(xproj, nx // 10)
        yclean = yproj - ndimage.uniform_filter1d(yproj, ny // 10)

        # get window functions
        xwind = self._window_function(xclean, border=3)
        ywind = self._window_function(yclean, border=3)

        # calculate correlation functions
        xavg = np.average(xclean)
        yavg = np.average(yclean)
        x = xwind * (xclean - xavg) / xavg
        y = ywind * (yclean - yavg) / yavg
        xcorr = np.correlate(x, x, mode='same')
        ycorr = np.correlate(y, y, mode='same')

        # filter out the peak (e.g. cosmics, ...)
        # imx = np.argmax(xcorr)
        # xcorr[imx] = 0.5 * (xcorr[imx - 1] + xcorr[imx + 1])
        # imx = np.argmax(ycorr)
        # ycorr[imx] = 0.5 * (ycorr[imx - 1] + ycorr[imx + 1])

        # fit cc functions to get fwhm
        xfit = self._fit_correlation(xcorr)
        yfit = self._fit_correlation(ycorr)

        # log it
        log.info('Found x=%.1f+-%.1f and y=%.1f+-%.1f.',
                 xfit.params['fwhm'].value, xfit.params['fwhm'].stderr,
                 yfit.params['fwhm'].value, yfit.params['fwhm'].stderr)

        # add to list
        self._data.append({
            'focus': float(image.header['TEL-FOCU']),
            'x': float(xfit.params['fwhm'].value),
            'xerr': float(xfit.params['fwhm'].stderr),
            'y': float(yfit.params['fwhm'].value),
            'yerr': float(yfit.params['fwhm'].stderr)
        })
    def _analyse_image(self, focus, data, backsub=True, xbad=None, ybad=None):
        # clean data
        data = self._clean(data, backsub=backsub, xbad=xbad, ybad=ybad)

        # get projections
        xproj = np.mean(data, axis=0)  # PROJECTIONS
        yproj = np.mean(data, axis=1)
        nx = len(xproj)
        ny = len(yproj)

        # remove background gradient
        xclean = xproj - ndimage.uniform_filter1d(xproj, nx // 10)
        yclean = yproj - ndimage.uniform_filter1d(yproj, ny // 10)

        # get window functions
        xwind = self._window_function(xclean, border=3)
        ywind = self._window_function(yclean, border=3)

        # calculate correlation functions
        xavg = np.average(xclean)
        yavg = np.average(yclean)
        x = xwind * (xclean - xavg) / xavg
        y = ywind * (yclean - yavg) / yavg
        xcorr = np.correlate(x, x, mode='same')
        ycorr = np.correlate(y, y, mode='same')

        # filter out the peak (e.g. cosmics, ...)
        # imx = np.argmax(xcorr)
        # xcorr[imx] = 0.5 * (xcorr[imx - 1] + xcorr[imx + 1])
        # imx = np.argmax(ycorr)
        # ycorr[imx] = 0.5 * (ycorr[imx - 1] + ycorr[imx + 1])

        # fit cc functions to get fwhm
        xfit = self._fit_correlation(xcorr)
        yfit = self._fit_correlation(ycorr)

        # log it
        log.info('Found x=%.1f+-%.1f and y=%.1f+-%.1f.',
                 xfit.params['fwhm'].value, xfit.params['fwhm'].stderr,
                 yfit.params['fwhm'].value, yfit.params['fwhm'].stderr)

        # add to list
        with self._data_lock:
            self._data.append({
                'focus': float(focus),
                'x': float(xfit.params['fwhm'].value),
                'xerr': float(xfit.params['fwhm'].stderr),
                'y': float(yfit.params['fwhm'].value),
                'yerr': float(yfit.params['fwhm'].stderr)
            })
Exemple #9
0
    def run(self, spec_data: SpectraData) -> SpectraData:
        """
        Smooth spectra data with a uniform smoother

        Parameters
        ----------
        spec_data : SpectraData
            The input spectra data

        Returns
        -------
        SpectraData
            The output spectra data
        """
        import scipy.ndimage as ndimage

        data = {}
        logger.info("Smoothing frequencies with uniform filter")
        messages = ["Smoothing frequencies with uniform filter"]
        for ilevel in range(spec_data.metadata.n_levels):
            n_freqs = spec_data.metadata.levels_metadata[ilevel].n_freqs
            smooth_length = self._get_smooth_length(n_freqs)
            logger.debug(
                f"Smoothing level {ilevel} with num points {smooth_length}")
            data[ilevel] = ndimage.uniform_filter1d(
                spec_data.get_level(ilevel), smooth_length, axis=-1)
            messages.append(
                f"Smoothed level {ilevel} with num points {smooth_length}")
        metadata = SpectraMetadata(**spec_data.metadata.dict())
        metadata.history.add_record(self._get_record(messages))
        logger.info(
            "Fourier coefficients calculated at evaluation frequencies")
        return SpectraData(metadata, data)
Exemple #10
0
def sampling_calc(freqfilename, sigma, out):
    freq_arr= genfromtxt(freqfilename,delimiter=",")
    # gaussian sampling from freq_arr with width sigma
    F = uniform_filter1d(freq_arr,size=sigma,mode = "wrap")
    # Prepare output format
    header = (("sigma=%d")%(sigma))
    np.savetxt(out, F, delimiter=",",header=header)
def point_through_time(point):
    fig, axes = plt.subplots(2, sharex=True)

    for ax, med_folder_name in zip(axes, ['MED5 - 0716', 'MED11 - 0730']):

        st_diagram = get_st_diagram(med_folder_name)

        theta_point = point * 2*np.pi/ st_diagram[0].size

        data = st_diagram[:, point]
        data = uniform_filter1d(data, 10)
        time = np.arange(data.size)/250 # domain in s

        ax.plot(time, data, c=DARK_GRAY_NORD)

        ax.set_ylabel(r'$\Delta \phi$', fontsize=16)

        med = med_folder_name.split(' ')[0]
        tag_ax(ax, text=med)

        ax.grid()

    ax.set_xlabel('Tiempo (s)', fontsize=16)

    plt.tight_layout()
    plt.savefig('../figures/med5_med11_thetatt.png', transparent=True)

    plt.show()
Exemple #12
0
def boxcar_convolution_filter(da,
                              width_in_time,
                              filter_type='low',
                              mode='reflect',
                              plot=False):
    from scipy.ndimage import uniform_filter1d

    dt = _np.mean(da.t[1:].data - da.t[:-1].data)
    width = _np.round(width_in_time / dt).astype(int)
    da_lp = _xr.DataArray(uniform_filter1d(da.data, size=width, mode=mode),
                          dims=da.dims,
                          coords=da.coords)
    da_hp = da - da_lp.data

    if plot is True:
        fig, ax = _plt.subplots()
        da.plot(ax=ax, label='Original')
        da_lp.plot(ax=ax, label='Lowpass')
        da_hp.plot(ax=ax, label='Highpass')
        ax.legend()

    if filter_type == 'high':
        return da_hp
    else:
        return da_lp
Exemple #13
0
def remove_10hz(baseline_subbed, zapped, tsamp):
    t = np.arange(baseline_subbed.shape[1] + 46)
    boxcar = square(t / (1. / 2. / np.pi / 10. / tsamp))
    av_pwr = baseline_subbed.mean(axis=0)
    A = np.fft.fft(boxcar[:-46])
    B = np.fft.fft(av_pwr)
    Br = B.conjugate()
    fftfreq = np.fft.fftfreq(B.shape[0], tsamp)
    tenHz_ind = np.where(np.abs(fftfreq - 10.) == np.abs(fftfreq -
                                                         10.).min())[0][0]
    if (B[tenHz_ind - 5:tenHz_ind + 5] *
            B[tenHz_ind - 5:tenHz_ind + 5].conjugate()).max() > 1e4:
        c = np.fft.ifft(A * Br)
        shift = np.argmax(c[:46]) + 1
        boxcar = boxcar[shift:shift + baseline_subbed.shape[1]]
        amp = np.mean(
            np.array([
                np.abs(np.median(av_pwr[np.where(boxcar > 0)])),
                np.abs(np.median(av_pwr[np.where(boxcar < 0)]))
            ]))
        boxcar = amp * boxcar
        boxcar_arr = np.ones(baseline_subbed.shape) * boxcar
        dont_subtract = np.where(zapped == 1)
        boxcar_arr[dont_subtract[0], dont_subtract[1]] = 0.
        no_boxcar = baseline_subbed - boxcar_arr
        baseline_no_boxcar = snd.uniform_filter1d(no_boxcar.mean(axis=0),
                                                  int(smooth_time / tsamp))
        data_out = no_boxcar - baseline_no_boxcar
    else:
        data_out = baseline_subbed
    return data_out
Exemple #14
0
def threshold_minimum(image, nbins=256, max_iter=10000):
    def find_local_maxima_idx(hist):
        maximum_idxs = list()
        direction = 1

        for i in range(hist.shape[0] - 1):
            if direction > 0:
                if hist[i + 1] < hist[i]:
                    direction = -1
                    maximum_idxs.append(i)
            else:
                if hist[i + 1] > hist[i]:
                    direction = 1

        return maximum_idxs

    hist, bin_centers = histogram(image.ravel(), nbins)

    smooth_hist = np.copy(hist).astype(np.float64)

    for counter in range(max_iter):
        smooth_hist = ndi.uniform_filter1d(smooth_hist, 3)
        maximum_idxs = find_local_maxima_idx(smooth_hist)
        if len(maximum_idxs) < 5:
            break

    # Find lowest point between the maxima
    threshold_idx = np.argmin(smooth_hist[maximum_idxs[0]:maximum_idxs[1] + 1])
    return bin_centers[maximum_idxs[0] + threshold_idx]
Exemple #15
0
    def calc_FoM(self, width, s_lambda=3, s_time=3, use_max=False):
        """Calculate the figure of merit (FoM) of a dataset (t, x, and lambda).

        In this case our figure of merit is calculated as the _maximum_ value
        along the spectral dimension aver the

        Parameters
        ----------
        data : ndarray (NxMxK)
            the array overwhich to calculate the SNR, assumes that it
            has dimensions (time, position, spectrum)
        width : int
            the width overwhich to calculate the average in the spatial
            dimension
        s_lambda : float (optional)
            the width of the gaussian kernel along the spectral dimension
        s_time : float (optional)
            the width of the gaussian kernel along the time dimension
        use_max : bool (optional)
            whether to use the max projection or not, will significantly speed
            up the calculation but will raise the noise floor in the process.

        Returns
        -------
        FoM : ndarray (NxK)
            The calculated figure of merit (FoM)
        """
        # before we make another copy we should trash the old one, if it exists
        # if we don't do this it can lead to a memory leak.
        try:
            del self.g_mean_data
        except AttributeError:
            pass

        # First calculate the moving average of the data along the spatial
        # dimension cast as float64 for better precision, this is necessary
        # for the later gaussian filters, but might as well do it now to avoid
        # making more copies of the data than necessary.

        if use_max:
            data = self.data.max(0, keepdims=True).astype(float)
        else:
            data = self.data.astype(float)

        mean_data = uniform_filter1d(data, width, axis=1)

        # calculate the gaussian blue along the spectral and time dimensions
        if s_time == 0 and s_lambda == 0:
            g_mean_data = mean_data
        else:
            g_mean_data = gaussian_filter(mean_data, (s_time, 0, s_lambda))

        g_mean_data_mean = g_mean_data.mean(axis=(0, 2))
        g_mean_data_std = g_mean_data.std(axis=(0, 2))
        g_mean_data_max = g_mean_data.max(axis=(0, 2))

        FoM = (g_mean_data_max - g_mean_data_mean) / g_mean_data_std

        self.FoM = FoM
        self.g_mean_data = g_mean_data
def preprocessing(features, method):
    """preprocess the sequence data

    Parameters:
        features (numpy array): original data sequence
        method (dictionary): preprocessing methods (filters) and sizes of filters

    Returns:
         features (numpy array): data sequence after preprocessing
    """
    for (key, value) in method:
        if key == 'gaussian':
            for i in range(6):
                features[:, i] = gaussian_filter1d(features[:, i],
                                                   sigma=value,
                                                   axis=0)
        elif key == 'median':
            for i in range(6):
                features[:, i] = median_filter(features[:, i], size=value)
        elif key == 'uniform':
            for i in range(6):
                features[:, i] = uniform_filter1d(features[:, i],
                                                  size=value,
                                                  axis=0)
        elif key == 'laplace':
            for i in range(6):
                features[:, i] = laplace(features[:, i])
        elif key == 'gaussian_laplace':
            for i in range(6):
                features[:, i] = gaussian_laplace(features[:, i], sigma=value)
    return features
Exemple #17
0
def temporal_filter1d(video: np.ndarray,
                      size: Union[int, float],
                      filter_type: str = "uniform") -> np.ndarray:
    """apply simple averaging per pixel trace

    Parameters
    ----------
    video: np.ndarray
        the input video nframes x nrows x ncols
    size: int or float
        passed to 'uniform_filter1d' as 'size' (int) or to 'gaussian_filter1d'
        as 'sigma' (float)
    filter_type: str
        'uniform' or 'gaussian'

    Returns
    -------
    output: np.ndarray
        the filtered video, same size and shape as the input video

    """
    filters = ['uniform', 'gaussian']
    if filter_type not in filters:
        raise ValueError(f"'filter_type' must be one of {filters}, "
                         f"but {filter_type} was provided.")
    if filter_type == "uniform":
        size = int(size)
        output = uniform_filter1d(video, size, axis=0, mode="nearest")
    else:
        output = gaussian_filter1d(video, size, axis=0, mode="nearest")

    return output
Exemple #18
0
def trace(im, yestimate=None, yorder=2, sigorder=4, step=10):
    """ Trace the spectrum.  Spectral dimension is assumed to be on the horizontal axis."""
    ny, nx = im.shape
    if yestimate is None:
        ytot = np.sum(im, axis=1)
        yestimate = np.argmax(ytot)
    # Smooth in spectral dimension
    # a uniform (boxcar) filter with a width of 50
    smim = ndimage.uniform_filter1d(im, 50, 1)
    nstep = nx // step
    # Loop over the columns in steps and fit Gaussians
    tcat = np.zeros(nstep, dtype=np.dtype([('x', float), ('pars', float, 4)]))
    for i in range(nstep):
        pars, cov = dln.gaussfit(
            y[yestimate - 10:yestimate + 10], im[yestimate - 10:yestimate + 10,
                                                 step * i + step // 2])
        tcat['x'][i] = step * i + step // 2
        tcat['pars'][i] = pars
    # Fit polynomail to y vs. x and gaussian sigma vs. x
    ypars = np.polyfit(tcat['x'], tcat['pars'][:, 1], yorder)
    sigpars = np.polyfit(tcat['x'], tcat['pars'][:, 2], sigorder)
    # Model
    mcat = np.zeros(nx,
                    dtype=np.dtype([('x', float), ('y', float),
                                    ('sigma', float)]))
    xx = np.arange(nx)
    mcat['x'] = xx
    mcat['y'] = np.poly1d(ypars)(xx)
    mcat['sigma'] = np.poly1d(sigpars)(xx)
    return tcat, ypars, sigpars, mcat
Exemple #19
0
    def average_velocity(self,Navg=-300):
        """Calculates moving average using
        scipy.ndimage.uniform_filter1d

        Called by remove_shear()

        Parameters
        ----------
        Navg : integer, optional
            Number of snapshots over which to average. If Navg < 0,
            average from end of series only, otherwise a sliding average
            is used.

        Returns
        -------
        uavg : ndarray
            If Navg < 0, uavg.shape==(Nh,Nv); otherwise a moving average
            is return, with uavg.shape==(Ntimes,Nh,Nv).
        """
        Navg = int(Navg)
        if Navg < 0:
            self.uavg = np.mean(self.u_tot[-Navg:,:,:], axis=0)  # shape=(Nh,Nv)
        elif Navg > 0:
            self.uavg = uniform_filter1d(self.u_tot, size=Navg, axis=0, mode='mirror')  # see http://stackoverflow.com/questions/22669252/how-exactly-does-the-reflect-mode-for-scipys-ndimage-filters-work
        else:
            # no averaging performed
            Navg = 1
            self.uavg = self.u_tot
        self.Navg = Navg
        return self.uavg
Exemple #20
0
 def uniform_filter1d_kernel(self, r_, c_):
     self.moving_avg[:, r_,
                     c_] = uniform_filter1d(self.video[:, r_, c_],
                                            size=self.batchSize,
                                            mode='constant',
                                            cval=0.0,
                                            origin=-(self.batchSize // 2),
                                            axis=0)
Exemple #21
0
def pre_check_line(line):
    project = mean(1 - line, axis=0)
    project = uniform_filter1d(project, line.shape[0] / 3)
    m = mean(project)
    if (m > 0.13) & (1.0 * line.shape[1] / line.shape[0] > 1.7):
        return True
    else:
        return False
Exemple #22
0
def correct_bp_and_zap_channels(data_arr, bp_bins, i, clipthresh_spec,
                                bp_thresh, old_bp, norm, zapped):
    bp = (snd.uniform_filter1d(data_arr, bp_bins,
                               axis=1))[:, bp_bins / 2::bp_bins]
    for j in range(bp.shape[1] - 1):
        div_bp = np.copy(bp[:, j])
        #div_bp[np.where(bp[:,j]==0)[0]] = 1e-5
        norm_bp = ((
            (data_arr[:, j * bp_bins:(j + 1) * bp_bins]).T / div_bp).T) * 64.
        zapped_bp = zapped[:, j * bp_bins:(j + 1) * bp_bins]
        if i == 0 and j == 0:
            changed = np.where(
                np.abs(norm_bp.mean(axis=1) - 64.) > clipthresh_spec - 64.)[0]
            norm_bp[changed] = 64.
            zapped[changed, j * bp_bins:(j + 1) * bp_bins] = 1.
        elif i != 0 and j == 0:
            div_bp = np.copy(old_bp)
            changed_1 = np.where(
                np.abs(bp[:, j] - old_bp) / div_bp > bp_thresh)[0]
            changed_2 = np.where(
                np.abs(norm_bp.mean(axis=1) - 64.) > clipthresh_spec - 64.)[0]
            #div_bp[np.where(old_bp==0)[0]] = 1e-5
            norm_bp[changed_1] = 64.
            norm_bp[changed_2] = 64.
            zapped[changed_1, j * bp_bins:(j + 1) * bp_bins] = 1.
            zapped[changed_2, j * bp_bins:(j + 1) * bp_bins] = 1.
        else:
            div_bp = np.copy(bp[:, j - 1])
            #div_bp[np.where(bp[:,j-1]==0)[0]] = 1e-5
            changed_1 = np.where(
                np.abs(bp[:, j] - bp[:, j - 1]) / div_bp > bp_thresh)[0]
            changed_2 = np.where(
                np.abs(norm_bp.mean(axis=1) - 64.) > clipthresh_spec - 64.)[0]
            norm_bp[changed_1] = 64.
            norm_bp[changed_2] = 64.
            zapped[changed_1, j * bp_bins:(j + 1) * bp_bins] = 1.
            zapped[changed_2, j * bp_bins:(j + 1) * bp_bins] = 1.
        norm[:, j * bp_bins:(j + 1) * bp_bins] = norm_bp
    div_bp1 = np.copy(bp[:, j + 1])
    #div_bp1[np.where(bp[:,j+1]==0)[0]] = 1e-5
    div_bp0 = np.copy(bp[:, j])
    #div_bp0[np.where(bp[:,j]==0)[0]] = 1e-5
    norm_bp = (((data_arr[:, (j + 1) * bp_bins:]).T / div_bp1).T) * 64.
    changed_1 = np.where(
        np.abs(bp[:, j + 1] - bp[:, j]) / div_bp0 > bp_thresh)[0]
    changed_2 = np.where(
        np.abs(norm_bp.mean(axis=1) - 64.) > clipthresh_spec - 64.)[0]
    norm_bp[changed_1] = 64.
    norm_bp[changed_2] = 64.
    zapped[changed_1, (j + 1) * bp_bins:] = 1.
    zapped[changed_2, (j + 1) * bp_bins:] = 1.
    norm[:, (bp.shape[1] - 1) * bp_bins:] = norm_bp
    norm[zap] = 64.
    zapped[zap] = 1.
    #norm[227] = 64.
    #norm[52]=64.
    old_bp = np.copy(bp[:, -1])
    return old_bp, zapped, norm
Exemple #23
0
def blaze_model(blaze, sdev=3):
    """Applies running average fit of the blaze order data.
        args:
            blaze: the average escelle blaze data
            sdev: stanrard deviation cutoff

        returns:
            a: the mean blaze model
    """
    def nan_helper(data):  #Function which allows interpolation though nans
        return lambda z: z.nonzero()[0]

    blaze = blaze.copy()
    data = blaze.copy()

    nan_mask = np.isnan(blaze)
    no_nan = nan_helper(blaze)

    #This will re-create the dataset but will interpolate though the nans giving it a place holder average value
    blaze[nan_mask] = np.interp(no_nan(nan_mask),
                                no_nan(~nan_mask),
                                blaze[~nan_mask],
                                period=1)

    a = uniform_filter1d(blaze, size=300,
                         mode="nearest")  #Applies moving averages on data

    #Takes difference the replaces datavalues which fall less 3 std of mean trend with mean
    diff = blaze - a
    cleaned_blaze_1 = blaze
    cleaned_blaze_1[diff < -sdev * np.std(diff)] = a[diff < -sdev *
                                                     np.std(diff)]

    #Repeats process on the new data but masks out datavalues which fall outside 3std in both directions
    a = uniform_filter1d(cleaned_blaze_1, size=300, mode="nearest")
    diff = blaze - a
    cleaned_blaze_2 = cleaned_blaze_1
    cleaned_blaze_2[np.absolute(diff) > sdev *
                    np.std(diff)] = a[np.absolute(diff) > sdev * np.std(diff)]
    a = uniform_filter1d(cleaned_blaze_1, size=300, mode="nearest")

    #Replace all positions that contanined nan values initially with nans again
    a[np.isnan(data)] = data[np.isnan(data)]

    return a
Exemple #24
0
def kdp_genesis(radar):
    #Inputs,
    #radar: Quality-controlled volume data
    print('KDP Section')
    #Using NWS method, creating ungridded, smoothed KDP field
    #Pulling in required radar fields; Differential phase to have a gradient applied, Correlation coefficient and reflectivity for smoothing
    phidp_ungridded = radar.fields['differential_phase']['data']
    cc_ungridded = radar.fields['cross_correlation_ratio']['data']
    ref_ungridded = radar.fields['reflectivity']['data']

    #Creating KDP as the range deriative/gradient of Phidp
    phidp_ungridded = np.asarray(np.gradient(phidp_ungridded)) / 0.50
    kdp_raw = phidp_ungridded[1, :, :]
    kdp_raw = ma.masked_where(kdp_raw > 40., kdp_raw)

    #Perform NWS outlined smoothing process
    kdp_9s = ndi.uniform_filter1d(kdp_raw, 8, 1)
    kdp_25s = ndi.uniform_filter1d(kdp_raw, 24, 1)
    kdp_9s = ma.masked_where(ref_ungridded < 20, kdp_9s)
    kdp_9s = ma.masked_where(cc_ungridded < 0.90, kdp_9s)
    kdp_25s = ma.masked_where(ref_ungridded < 20, kdp_25s)
    kdp_25s = ma.masked_where(cc_ungridded < 0.90, kdp_25s)
    kdp_9s[ref_ungridded < 40] = kdp_25s[ref_ungridded < 40]
    kdp_9s = ma.masked_where(ref_ungridded < 35, kdp_9s)
    kdp_9s = ma.masked_where(cc_ungridded < 0.90, kdp_9s)
    kdp_8 = np.copy(kdp_9s)
    kdp_8 = ma.masked_where(kdp_8 < 8, kdp_8)
    kdp_8 = ma.masked_where(ref_ungridded > 50, kdp_8)
    kdp_8 = ma.filled(kdp_8, fill_value=-2)
    kdp_9s = ma.masked_where(kdp_8 > 1, kdp_9s)
    kdp_9s = ma.masked_where(ref_ungridded < 20., kdp_9s)

    #Create dictionary
    kdp_nwsdict = {}
    kdp_nwsdict['units'] = 'degrees/km'
    kdp_nwsdict['standard_name'] = 'specific_differential_phase_hv'
    kdp_nwsdict['long_name'] = 'Specific Differential Phase (KDP)'
    kdp_nwsdict['coordinates'] = 'elevation azimuth range'
    kdp_nwsdict['data'] = kdp_9s
    kdp_nwsdict['valid_min'] = 0.0
    kdp_nwsdict['Clipf'] = 3906250000.0

    #Returning variables,
    #kdp_nwsdict: Fully smoothed KDP dictionary to be added to radar data object
    return kdp_nwsdict
Exemple #25
0
    def smooth(self, smoothPix):
        """Smooths SED.flux with a uniform (boxcar) filter of width smoothPix. Cannot be undone.

        @type smoothPix: int
        @param smoothPix: size of uniform filter applied to SED, in pixels

        """
        smoothed = ndimage.uniform_filter1d(self.flux, smoothPix)
        self.flux = smoothed
Exemple #26
0
def membrane_currents_dg(filename, t):
    
    phi_sn, phi_se, phi_sg, phi_dn, phi_de, phi_dg, phi_msn, phi_mdn, phi_msg, phi_mdg = membrane_potentials(filename)
    E_Na_sn, E_Na_sg, E_Na_dn, E_Na_dg, E_K_sn, E_K_sg, E_K_dn, E_K_dg, E_Cl_sn, E_Cl_sg, E_Cl_dn, E_Cl_dg, E_Ca_sn, E_Ca_dn = reversal_potentials(filename)
    cNa_sn, cNa_se, cNa_sg, cNa_dn, cNa_de, cNa_dg, cK_sn, cK_se, cK_sg, cK_dn, cK_de, cK_dg, cCl_sn, cCl_se, cCl_sg, cCl_dn, cCl_de, cCl_dg, cCa_sn, cCa_se, cCa_dn, cCa_de = ion_concentrations(filename)
    n, h, s, c, q, z = state_variables(filename)
    my_cell = dummy_cell()
    
    # capacitive current
    dt = np.diff(t)
    I_cap = my_cell.C_mdg*my_cell.A_m * np.diff(phi_mdg)/dt
    
    # membrane currents through ion channels
    I_leak = my_cell.A_m * (my_cell.g_Na_leak_g*(phi_mdg - E_Na_dg) + my_cell.g_Cl_leak_g*(phi_mdg - E_Cl_dg))
    I_pump = my_cell.A_m * my_cell.F *my_cell.j_pump_g(cNa_dg, cK_de)
    dphi = (phi_mdg - E_K_dg)*1000
    phi_m_mil = phi_mdg*1000
    bE_K_mil = my_cell.bE_K_dg*1000
    fact1 = (1 + np.exp(18.4/42.4))/(1 + np.exp((dphi + 18.5)/42.5))
    fact2 = (1 + np.exp(-(118.6+bE_K_mil)/44.1))/(1+np.exp(-(118.6+phi_m_mil)/44.1))
    f = np.sqrt(cK_de/my_cell.cbK_de) * fact1 * fact2 
    I_Kir = my_cell.A_m * my_cell.g_K_IR * f * (phi_mdg - E_K_dg)
    
    # interpolate
    f_I_cap = interp1d(t[:-1], I_cap, 'cubic')
    f_I_leak = interp1d(t, I_leak, 'cubic')
    f_I_pump = interp1d(t, I_pump, 'cubic')
    f_I_Kir = interp1d(t, I_Kir, 'cubic')

    tt = np.linspace(int(t[0]), int(t[-1]), len(t))
    I_cap = f_I_cap(tt[:-1])
    I_leak = f_I_leak(tt)
    I_pump = f_I_pump(tt)
    I_Kir = f_I_Kir(tt)
    
    # calculate moving averages
    dt = np.diff(tt)[0]
    size = int(10/dt)
    av_I_cap = uniform_filter1d(I_cap, size)
    av_I_leak = uniform_filter1d(I_leak, size)
    av_I_pump = uniform_filter1d(I_pump, size)
    av_I_Kir = uniform_filter1d(I_Kir, size)

    return tt, av_I_cap, av_I_leak, av_I_pump, av_I_Kir
Exemple #27
0
def spectral_peak_trigger(wav, range=(1000, 1e4), method='cwt', interval=1./44100, threshold_ratio=1.2, widths=None):

    f = abs(np.fft.fft(wav))
    freq = np.fft.fftfreq(f.size, interval)
    plim = f.size // 2
    f = f[:plim]
    freq = freq[:plim]
    f = spi.uniform_filter1d(f, 100)

    if method == 'cwt':
        if widths is None:
            widths = np.linspace(f.size / 50, f.size / 10, 10)
        peaks_inds = sps.find_peaks_cwt(f, widths)
    else:
        x_left, x_right = range
        mask = (abs(freq) >= range[0]) * (abs(freq) <= range[1])
        x_left_ind, x_right_ind = np.nonzero(mask)[0][0], np.nonzero(mask)[0][-1]
        f_left = np.mean(f[x_left_ind:x_left_ind + 5])
        f_right = np.mean(f[x_right_ind - 5:x_right_ind])
        peaks_inds = np.nonzero(freq >= int(np.mean(range)))[0][0]

    # Plotting spectrum
    plt.ion()
    plt.show()
    plt.xlabel('Frequency (Hz)')
    plt.clf()
    plt.plot(freq, f)
    plt.vlines(range[0], 0, np.max(f))
    plt.vlines(range[1], 0, np.max(f))
    plt.scatter(freq[peaks_inds], f[peaks_inds] + np.max(f) * 0.02)
    plt.xlim(0, None)
    plt.ylim(0, None)
    plt.draw()
    plt.pause(0.001)

    trigger = False
    if method == 'cwt':
        if np.count_nonzero((peaks_inds >= range[0]) * (peaks_inds <= range[1])) > 0:
            trigger = True
    else:
        base_int = (f_left + f_right) * (x_right_ind - x_left_ind + 1) / 2
        chunk_int = np.sum(f[mask])
        peak_ratio = chunk_int / base_int
        if peak_ratio >= threshold_ratio:
            trigger = True
        print('{}: Integration ratio = {}.'.format(datetime.datetime.now(), peak_ratio))

    if trigger:
        try:
            os.makedirs('log')
        except:
            pass
        f = open(os.path.join('log', str(datetime.date.today()) + '.txt'), 'a')
        f.write('{}: Integration ratio = {}.\n'.format(datetime.datetime.now(), peak_ratio))
        f.close()
    return trigger
def smooth_data(data, smooth=0, polyorder=0):
    """
    Smooth the input data with a kernel of a width ``smooth``. If ``polyorder``
    is provided, will smooth with a Savitzky-Golay filter, while if
    ``polyorder=0``, the default, then only a top-hat kernel will be used. From
    experimentation, ``smooth=5`` with ``polyorder=3``provides a good result
    for noisy, but spectrally resolved data.

    ..warning::
        When smoothing low resolution data, this can substantially alter the
        line profile, so measurements must be taken with caution.

    Args:
        data (array): Data to smooth.
        smooth (optional[int]): The width of the kernel for smooth in number of
            channels.
        polyorder (optional[int]): Polynomial order for the Savitzky-Golay
            filter. This must be smaller than ``smooth``. If not provided, the
            smoothing will only be a top-hat filter.
        silent (bool): Whether to print the processes.

    Returns:
        smoothed_data (array): A smoothed copy of ``data``.
    """
    assert data.ndim == 3, "Data must have 3 dimensions to smooth."
    if smooth > 1:
        if polyorder > 0:
            from scipy.signal import savgol_filter
            smooth += 0 if smooth % 2 else 1
            smoothed_data = savgol_filter(data,
                                          smooth,
                                          polyorder=polyorder,
                                          mode='wrap',
                                          axis=0)
        else:
            from scipy.ndimage import uniform_filter1d
            a = uniform_filter1d(data, smooth, mode='wrap', axis=0)
            b = uniform_filter1d(data[::-1], smooth, mode='wrap', axis=0)[::-1]
            smoothed_data = np.mean([a, b], axis=0)
    else:
        smoothed_data = data.copy()
    return smoothed_data
Exemple #29
0
    def fix_cosmic_rays(self, width, z_score_cutoff=2.5):
        """Remove cosmic rays from good peaks.

        Assumes that cosmic rays only show up for one frame and are *bright*
        """
        # calculate the average around the peaks
        mean_data_sum = uniform_filter1d(self.data, width, axis=1).sum(2)
        z_score = (mean_data_sum.max(0) - mean_data_sum.mean(0)) / mean_data_sum.std(0)
        bad_peaks = np.arange(len(z_score))[z_score > z_score_cutoff]

        self.peaks = [p for p in self.peaks if p not in bad_peaks]
Exemple #30
0
    def smooth(self, smoothPix):
        """Smooths SED.flux with a uniform (boxcar) filter of width smoothPix. Cannot be undone.

        @type smoothPix: int
        @param smoothPix: size of uniform filter applied to SED, in pixels
        @return: new Spectrum
        """
        from scipy import ndimage
        smoothed = ndimage.uniform_filter1d(self._flux, smoothPix)
        sp = Spectrum(self.Name, self.Freq, flux=smoothed)
        # self._flux = smoothed
        return sp
Exemple #31
0
    def fix_cosmic_rays(self, width, z_score_cutoff=2.5):
        '''
        Method to remove cosmic rays from good peaks.

        Assumes that cosmic rays only show up for one frame and are *bright*
        '''
        # calculate the average around the peaks
        mean_data_sum = uniform_filter1d(self.data, width, axis=1).sum(2)
        z_score = (mean_data_sum.max(0) -
                   mean_data_sum.mean(0)) / mean_data_sum.std(0)
        bad_peaks = np.arange(len(z_score))[z_score > z_score_cutoff]

        self.peaks = [p for p in self.peaks if p not in bad_peaks]
def top_hat_smooth(model_spectra, width):

    width_A = int(width / boss_diff_7000)
    print("width", width_A)
    boss_smoothed = ndimage.uniform_filter1d(alphas_boss_masked, width_A)
    boss_norm = alphas_boss_masked / boss_smoothed

    for i, model_spectrum in enumerate(model_spectra):
        model_smoothed = ndimage.uniform_filter1d(model_spectrum, width_A)
        model_norm = model_spectrum / model_smoothed
        # plt.plot(wav_masked, model_spectrum, label='bc03')
        # plt.plot(wav_masked, model_smoothed, label='bc03 smooth')
        plt.plot(wav_masked, model_norm, label='bc03 norm' + str(i))

    plt.plot(wav_masked, alphas_boss_masked, label='BOSS')
    plt.plot(wav_masked, boss_smoothed, label='BOSS smooth')
    plt.plot(wav_masked, boss_norm, label='BOSS norm')
    plt.ylim(0, 2)
    # plt.xlim(3850, 4100)  # TEST
    # plt.xlim(4945, 5065)  # TEST
    plt.legend()
    plt.show()
def calculate_error(meas_data, sim_data, meas_res):

    filt_size = 50
    y_grid = meas_data.y_grid
    y_grid_filt = ndimage.uniform_filter1d(y_grid, filt_size)

    y_bounds = [1e-3, 14e-3]
    idy = [
        np.argmin(abs(y_grid - y_bounds[0])),
        np.argmin(abs(y_grid - y_bounds[1]))
    ]

    avg_vel = np.average(meas_data.profiles[-1])

    val_error = 0.0
    der_error = 0.0
    errors = []
    for i in range(len(meas_data.profiles)):
        filt_size_meas = \
            int(round(meas_res/abs(y_grid[-1]-y_grid[0]) * len(y_grid)))
        vel_sim_filt = \
            ndimage.uniform_filter1d(sim_data.profiles[i], filt_size_meas)
        vel_meas_filt = \
            ndimage.uniform_filter1d(meas_data.profiles[i], filt_size)
        vel_sim_double_filt = \
            ndimage.uniform_filter1d(sim_data.profiles[i], filt_size)

        der_meas = np.gradient(vel_meas_filt, y_grid_filt)
        der_sim = np.gradient(vel_sim_double_filt, y_grid_filt)
        der_error = np.nansum((der_meas - der_sim)**2)
        vel_meas = meas_data.profiles[i][idy[0]:idy[1]]
        #vel_sim = sim_data.profiles[i][idy[0]:idy[1]]
        vel_sim = vel_sim_filt[idy[0]:idy[1]]
        val_error += np.nansum(((vel_meas - vel_sim) / avg_vel)**2)
        errors.append(val_error + der_error)

    error = val_error + der_error
    return error, errors
Exemple #34
0
 def parse_mca(self, data, twotheta, energy=None, col=1, chmin=0, chmax=np.inf,
                     average=0, verbose=False):
     """
         Loads and processes .fio files produced by the ``online``
         software at DESY-FS.
         Here the data is supposed to be 1-dimensional, meaning 
         that only one COLUMN of the file will be processed.
         
         Inputs:
             data : str or file handle
                 Name or file handle of the .fio file to load
             
             twotheta : str or float
                 Either value or motor name containing the angle 
                 between incident and scattered beam.
             
             col : int
                 Number of column that shall be processed if data 
                 is 2-dimensional
             
             chmin, chmax : int
                 Channel limits whithin that the data will be 
                 cropped.
             
             average : int
                 Width of box filter for smoothing data in channels
             
             verbose : bool
                 Talk a lot?
     """
     if isinstance(data, np.ndarray):
         if data.ndim == 1:
             self.mcadata = data.copy()
         elif data.ndim == 2:
             self.mcadata = data[:,col]
     else:
         self.mca= FIOdata(data, verbose=verbose)
         self.mcadata = self.mca[:,col]
     self.channels = np.arange(len(self.mcadata)).astype(float)
     self.ind = (self.channels>chmin) * (self.channels<chmax)
     ind2 = self.mcadata < self.bg
     self.mcadata[ind2] = self.bg
     try:
         self.Iint = self.mcadata.sum() / self.mca.parameters["SAMPLE_TIME"]
         #print "Integral Intensity = %2f cps" %self.Iint
     except:
         self.Iint = self.mcadata.sum()
         #print "Integral Intensity = %2f counts" %self.Iint
     self.energy = (self.channels - self.p["K0"]) / self.p["c"]
     if average:
         self.mcadata = ndimage.uniform_filter1d(self.mcadata.astype(float), average)
     if energy is None and hasattr(self, "mca"):
         try:
             self.elastic = self.mca.parameters["ENERGY"]
         except KeyError:
             print("Warning: energy of incident photons could not be retrieved.")
     else:
         self.elastic = energy
     try: self.twotheta = float(twotheta)
     except: self.twotheta = self.mca.parameters[twotheta]
     Lcompton = self.lambda_c * (1 - np.cos(np.radians(self.twotheta))) \
                + 12398./self.elastic
     self.Ecompton = 12398. / Lcompton # Energy of compton peak
Exemple #35
0
def test_uniform_filter1d_roundoff_errors():
    # gh-6930
    in_ = np.repeat([0, 1, 0], [9, 9, 9])
    for filter_size in range(3, 10):
        out = sndi.uniform_filter1d(in_, filter_size)
        assert_equal(out.sum(), 10 - filter_size)
Exemple #36
0
    def calc_FoM(self, width, s_lambda=3, s_time=3, use_max=False):
        '''
        Calculate the figure of merit (FoM) of a dataset (t, x, and lambda)

        In this case our figure of merit is calculated as the _maximum_ value
        along the spectral dimension aver the

        Parameters
        ----------
        data : ndarray (NxMxK)
            the array overwhich to calculate the SNR, assumes that it
            has dimensions (time, position, spectrum)
        width : int
            the width overwhich to calculate the average in the spatial
            dimension
        s_lambda : float (optional)
            the width of the gaussian kernel along the spectral dimension
        s_time : float (optional)
            the width of the gaussian kernel along the time dimension
        use_max : bool (optional)
            whether to use the max projection or not, will significantly speed
            up the calculation but will raise the noise floor in the process.

        Returns
        -------
        FoM : ndarray (NxK)
            The calculated figure of merit (FoM)
        '''

        # before we make another copy we should trash the old one, if it exists
        # if we don't do this it can lead to a memory leak.
        try:
            del self.g_mean_data
        except AttributeError:
            pass

        # First calculate the moving average of the data along the spatial
        # dimension cast as float64 for better precision, this is necessary
        # for the later gaussian filters, but might as well do it now to avoid
        # making more copies of the data than necessary.

        if use_max:
            data = self.data.max(0, keepdims=True).astype(float)
        else:
            data = self.data.astype(float)

        mean_data = uniform_filter1d(data, width, axis=1)

        # calculate the gaussian blue along the spectral and time dimensions
        if s_time == 0 and s_lambda == 0:
            g_mean_data = mean_data
        else:
            g_mean_data = gaussian_filter(mean_data, (s_time, 0, s_lambda))

        g_mean_data_mean = g_mean_data.mean(axis=(0, 2))
        g_mean_data_std = g_mean_data.std(axis=(0, 2))
        g_mean_data_max = g_mean_data.max(axis=(0, 2))

        FoM = (g_mean_data_max-g_mean_data_mean)/g_mean_data_std

        self.FoM = FoM
        self.g_mean_data = g_mean_data
Exemple #37
0
 def update(self):
     self.yf = nd.uniform_filter1d(self.d[:, self.chan], self.width, mode='nearest')
     self.replot = True
Exemple #38
0
def threshold_minimum(image, nbins=256, max_iter=10000):
    """Return threshold value based on minimum method.

    The histogram of the input `image` is computed and smoothed until there are
    only two maxima. Then the minimum in between is the threshold value.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    max_iter: int, optional
        Maximum number of iterations to smooth the histogram.

    Returns
    -------
    threshold : float
        Upper threshold value. All pixels with an intensity higher than
        this value are assumed to be foreground.

    Raises
    ------
    RuntimeError
        If unable to find two local maxima in the histogram or if the
        smoothing takes more than 1e4 iterations.

    References
    ----------
    .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding
           algorithms," CVGIP: Graphical Models and Image Processing,
           vol. 55, pp. 532-537, 1993.
    .. [2] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell
           images", Annals of the New York Academy of Sciences 128: 1035-1053
           DOI:10.1111/j.1749-6632.1965.tb11715.x

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()
    >>> thresh = threshold_minimum(image)
    >>> binary = image > thresh
    """

    def find_local_maxima_idx(hist):
        # We can't use scipy.signal.argrelmax
        # as it fails on plateaus
        maximum_idxs = list()
        direction = 1

        for i in range(hist.shape[0] - 1):
            if direction > 0:
                if hist[i + 1] < hist[i]:
                    direction = -1
                    maximum_idxs.append(i)
            else:
                if hist[i + 1] > hist[i]:
                    direction = 1

        return maximum_idxs

    hist, bin_centers = histogram(image.ravel(), nbins)

    smooth_hist = np.copy(hist).astype(np.float64)

    for counter in range(max_iter):
        smooth_hist = ndi.uniform_filter1d(smooth_hist, 3)
        maximum_idxs = find_local_maxima_idx(smooth_hist)
        if len(maximum_idxs) < 3:
            break

    if len(maximum_idxs) != 2:
        raise RuntimeError('Unable to find two maxima in histogram')
    elif counter == max_iter - 1:
        raise RuntimeError('Maximum iteration reached for histogram'
                           'smoothing')

    # Find lowest point between the maxima
    threshold_idx = np.argmin(smooth_hist[maximum_idxs[0]:maximum_idxs[1] + 1])

    return bin_centers[maximum_idxs[0] + threshold_idx]
Exemple #39
0
def data_preparation(wl, t, d, wiener=3, trunc_back=0.05, trunc_scans=0, start_det0_is_para=True,
                     do_scan_correction=True, do_iso_correction=True, plot=1, n=10):
    d = d.copy()
    #d[..., 0, :]= shift_linear_part(d[..., 0, :])
    #d[..., 1, :] = shift_linear_part(d[..., 1, :], 1, t)

    if wiener == 'svd':
        for i in range(d.shape[-1]):
            d[:, :, 0, i] = dv.svd_filter(d[:, :, 0, i], 3)
            d[:, :, 1, i] = dv.svd_filter(d[:, :, 1, i], 3)
    elif wiener > 1:
        d = sig.wiener(d, (wiener, 3, 1, 1))
    elif wiener < 0:
        d = nd.uniform_filter1d(d, -wiener, 0, mode='nearest')

    #d, back0 = back_correction(d, use_robust=1)
    #back1 = back0
    if do_scan_correction:
        d = scan_correction(d, dv.fi(t, 0.5))
    import astropy.stats as stats

    def fi(x, ax=0): return stats.sigma_clip(
        x, sigma=trunc_back, iters=3, axis=ax).mean(ax)
    back0 = fi(d[:n, ..., 0, :], ax=0)
    back1 = fi(d[:n, ..., 1, :], ax=0)
    back = 0.5*(back0+back1).mean(-1)
    d[..., 0, :] -= back.reshape(1, 32, -1)
    d[..., 1, :] -= back.reshape(1, 32, -1)

    if do_scan_correction:
        d = scan_correction(d, dv.fi(t, 0))

    # gr -> vert -> parallel zum 0. scan
    #fi = lambda x, ax=-1: np.median(x, ax)
    fi = lambda x, ax=- \
        1: stats.sigma_clip(x, sigma=trunc_scans, iters=2, axis=ax).mean(ax)
    if start_det0_is_para:
        para_0 = fi(d[..., 0, ::2])
        senk_0 = fi(d[..., 0, 1::2])
        para_1 = fi(d[..., 1, 1::2])
        senk_1 = fi(d[..., 1, 0::2])
    else:
        para_0 = fi(d[..., 0, 1::2])
        senk_0 = fi(d[..., 0, 0::2])
        para_1 = fi(d[..., 1, 0::2])
        senk_1 = fi(d[..., 1, 1::2])

    iso_0 = (para_0 + 2*senk_0) / 3.
    iso_1 = (para_1 + 2*senk_1) / 3.

    if do_iso_correction:
        iso_factor = calc_fac(iso_0, iso_1, dv.fi(t, 1))
    else:
        iso_factor = 1

    senk = 0.5*senk_0 + 0.5 * (iso_factor*senk_1)
    senk -= senk[:10, :].mean(0)
    para = 0.5*para_0 + 0.5 * (iso_factor*para_1)
    para -= para[:10, :].mean(0)
    iso = (2*senk + para)/3
    if plot:
        import matplotlib.pyplot as plt
        from skultrafast.plot_helpers import lbl_spec, mean_spec
        plt.figure(figsize=(12, 4))
        plt.subplot(121)
        plt.plot(wl, iso[:10, :].mean(0))
        plt.plot(wl, back0)
        plt.plot(wl, back1)
        lbl_spec()
        plt.legend(['iso_rest', 'back0', 'back1'])
        plt.subplot(122)
        mean_spec(wl, t, [iso_0, iso_factor*iso_1], (1, 100))
        mean_spec(wl, t, [para, senk], (1, 100), color='r')
        plt.legend(['iso_0', 'iso_1 * %.2f' % iso_factor])

    return iso, para, senk