Ejemplo n.º 1
0
def getbackground_spline(data,spike_width):

    """ From a 1-d data array determine a background iteratively by fitting a spline
    and removing data more than a few sigma from the spline """

    # Remove the first and last element in data from the fit.
    y=np.copy(data[1:-1])
    arraysize=y.shape[0]
    x=np.arange(arraysize)

    # Iterate 4 times
    for iteration in range(4):

        # First iteration fits a linear spline with 3 knots.
        if iteration==0:
            npieces=3
        # Second iteration fits a quadratic spline with 10 knots.
        elif iteration==1:
            npieces=10
        # Third and fourth iterations fit a cubic spline with 50 and 75 knots respectively.
        elif iteration>1:
            npieces=iteration*25
        deg=min(iteration+1,3)
        
        # Size of each piece of the spline.
        psize = arraysize/npieces
        firstindex = arraysize%psize + int(psize/2)
        indices = np.trim_zeros(np.arange(firstindex,arraysize,psize))

        # Fit the spline
        thisfit = interpolate.LSQUnivariateSpline(x,y,indices,k=deg)
        
        thisfitted_data=np.asarray(thisfit(x),y.dtype)

        # Subtract the fitted spline from the data
        residual = y-thisfitted_data
        this_std = np.std(residual)

        # Reject data more than 5sigma from the residual. 
        flags = residual > 5*this_std

        # Set rejected data value to the fitted value + 1sigma.
        y[flags] = thisfitted_data[flags] + this_std

    # Final iteration has knots separated by "spike_width".
    npieces = int(y.shape[0]/spike_width)
    psize = (x[-1]+1)/npieces
    firstindex = int((y.shape[0]%psize))
    indices = np.trim_zeros(np.arange(firstindex,arraysize,psize))

    # Get the final background.
    finalfit = interpolate.LSQUnivariateSpline(x,y,indices,k=3)
    thisfitted_data = np.asarray(finalfit(x),y.dtype)
    
    # Insert the original data at the beginning and ends of the data array.
    thisfitted_data = np.append(thisfitted_data,data[-1])
    thisfitted_data = np.insert(thisfitted_data,0,data[0])

    return(thisfitted_data)
Ejemplo n.º 2
0
def getbackground_spline(data, spike_width):
    """ From a 1-d data array determine a background iteratively by fitting a spline
    and removing data more than a few sigma from the spline """

    # Remove the first and last element in data from the fit.
    y = data[:]
    arraysize = y.shape[0]
    x = np.arange(arraysize)

    # Iterate 2 times
    for iteration in range(2):

        # First iteration fits a linear spline with 3 knots.
        if iteration == 0:
            npieces = 3
            deg = 1
        # Second iteration fits a cubic spline to every second data point with 10 knots.
        elif iteration == 1:
            npieces = int(arraysize / 3)
            deg = 3

        # Size of each piece of the spline.
        psize = arraysize / npieces
        firstindex = arraysize % psize + int(psize / 2)
        indices = np.trim_zeros(np.arange(firstindex, arraysize, psize))

        # Fit the spline
        thisfit = interpolate.LSQUnivariateSpline(x, y, indices, k=deg)

        thisfitted_data = np.asarray(thisfit(x), y.dtype)

        # Subtract the fitted spline from the data
        residual = y - thisfitted_data
        this_std = np.std(residual)

        # Reject data more than 5sigma from the residual.
        flags = residual > 5 * this_std

        # Set rejected data value to the fitted value + 1sigma.
        y[flags] = thisfitted_data[flags] + this_std

    # Final iteration has knots separated by "spike_width".
    npieces = int(y.shape[0] / spike_width)
    psize = (x[-1] + 1) / npieces
    firstindex = int((y.shape[0] % psize))
    indices = np.trim_zeros(np.arange(firstindex, arraysize, psize))

    # Get the final background.
    finalfit = interpolate.LSQUnivariateSpline(x, y, indices, k=3)
    thisfitted_data = np.asarray(finalfit(x), y.dtype)

    return (thisfitted_data)
Ejemplo n.º 3
0
    def spline_lsqr(self, per, freq=None):
        """

        Fit a spline to the log periodogram using least-squares

        Parameters
        ----------
        per : ndarray
            periodogram
        freq :


        """

        if freq is None:
            # If the frequencies where per is computed are not given
            NI = len(per)
            if NI not in list(self.logf.keys()):
                f = np.fft.fftfreq(NI) * self.fs
                self.logf[NI] = np.log(f[f > 0])
            else:
                f = np.concatenate(([0], np.exp(self.logf[NI])))

            n = np.int((NI - 1) / 2.)
            z = per[1:n + 1]
            v = np.log(z) - self.C0

            # Spline estimator of the log-PSD
            inds_est = np.where((self.f_min_est <= f[1:self.n + 1]) & (
                        f[1:self.n + 1] <= self.f_max_est))[0]
            spl = interpolate.LSQUnivariateSpline(self.logf[NI][inds_est],
                                                  v[inds_est],
                                                  self.logf_knots,
                                                  k=self.D,
                                                  ext=self.ext)

        else:
            # If the frequencies are given
            v = np.log(per) - self.C0
            # Spline estimator of the log-PSD
            inds_est = np.where((self.f_min_est <= freq)
                                & (freq <= self.f_max_est))[0]
            spl = interpolate.LSQUnivariateSpline(np.log(freq)[inds_est],
                                                  v[inds_est],
                                                  self.logf_knots,
                                                  k=self.D,
                                                  ext=self.ext)

        return spl
Ejemplo n.º 4
0
def interp_gamma():
    """
    We will interpolate based on reasonable values of the std dev and use the exact mean.

    For the std dev, 0.001 is a very small std dev and 4 is a pretty big spread (on the log scale).

    :return: a stand in fn for interpolating the conjugate map
    """
    # get the grid we will search
    qt_lb, qt_ub = 0.0001, 4
    qt = np.exp(np.linspace(np.log(qt_lb), np.log(qt_ub), 5000))

    # do a bunch of solves
    z = np.empty(len(qt))
    for i in range(len(qt)):
        z[i] = np.log(pois_alpha_param(qt[i]**2))

    knots = np.exp(np.linspace(np.log(qt[5]), np.log(qt[-5]), 100))
    bbox = [0, qt_ub + 2]
    _interp_fn_log_alpha = interpolate.LSQUnivariateSpline(qt,
                                                           z,
                                                           knots,
                                                           bbox=bbox,
                                                           k=1)

    # transform to original scale and variance instead of std dev
    fn = partial(gamma_transformer, fn=_interp_fn_log_alpha)
    fn.ft_lb, fn.ft_ub, fn.qt_lb, fn.qt_ub = -np.inf, np.inf, bbox[0], bbox[1]
    return fn
 def _least_squares_spline(self, v, y):
     """
     Fits y = g(v) where g is a cubic spline with the stipulated knots. 
     """
     ixs = np.argsort(v)
     _v = v[ixs]
     _y = y[ixs]
     if type(self._knots) is int:
         if self._knots > 1000:
             print 'WARNING: Large number of knots requested.'
             print 'Consider using fewer knots'
         knots_actual = np.percentile(_v, np.linspace(0, 100, self._knots))
         if np.abs(knots_actual[0] - _v[0]) < 1e-6:
             knots_actual = knots_actual[1:]
         if np.abs(knots_actual[-1] - _v[-1]) < 1e-6:
             knots_actual = knots_actual[:-1]
     else:
         knots_actual = self._knots
     try:
         spline = ipl.LSQUnivariateSpline(_v, _y, knots_actual, k=3)
     except ValueError as e:
         msg = "Spline fit returned error\nTry reducing the number of knots"
         print e
         raise StandardError(msg)
     deriv = spline.derivative()
     return (spline(v), deriv(v), spline)
Ejemplo n.º 6
0
def spline_interpolator(tau_x, x, f, knots=None, deg=2):
    """
    returns a spline interpolator with knots uniformly spaced at tau_x over x
    
    Args:
        tau_x: the step size in whatever units of c
        x: the unit of 'time'
        f: the function we want the autocorrelation of
        knots: the locations of the knots (default to uniform in x)
        deg: the degree of the spline interpolator to use. continuous to 
        deg-1 derivative
    Returns:
        scipy.interpolate.LSQUnivariateSpline object, interpolating f on x
    """
    # note: stop is *not* included in the iterval, so we add an extra step
    # to make it included
    if (knots is None):
        step_knots = tau_x
        min_x, max_x = min(x), max(x)
        knots = np.linspace(start=min_x,
                            stop=max_x,
                            num=np.ceil((max_x - min_x) / step_knots),
                            endpoint=True)
    # get the spline of the data
    spline_args = \
        dict(
            # degree is k, (k-1)th derivative is continuous
            k=deg,
            # specify the spline knots (t) uniformly in time at the
            # autocorrelation time. dont want the endpoints
            t=knots[1:-1]
            )
    return interpolate.LSQUnivariateSpline(x=x, y=f, **spline_args)
Ejemplo n.º 7
0
def resample_attitude(obs_mjd, exp_time, attitude):
    """Resamples table of attitude data (RA, DEC, ROLL, DROLL)
    to the midtimes of the observations of duration exp_time
    Note: exptime in seconds, mjd in days,
    DROLL is change in roll angle during exposure
    """
    nrows = obs_mjd.shape[0]
    outparam = np.zeros((nrows, 5))
    outparam[:, 0] = obs_mjd

    rollin = attitude[:, 3].copy()

    for n in range(1, len(rollin)):
        if rollin[n] > rollin[n - 1]:
            rollin[n:] -= 360

    dt = 0.5 * exp_time / 24 / 3600
    t0 = attitude[0, 0]
    t = attitude[:, 0] - t0
    knots = t[3:-3:5]
    spl = interpolate.LSQUnivariateSpline(t, rollin, knots)
    outparam[:, 3] = spl(obs_mjd - t0) % 360
    outparam[:, 4] = np.abs(spl(obs_mjd - t0 + dt) - spl(obs_mjd - t0 - dt))

    sample_dt = np.max(np.diff(attitude[:, 0]))
    if sample_dt > 2 * dt:
        dt = 0.5 * sample_dt
    for n in range(nrows):
        t = obs_mjd[n]
        ind = np.abs(attitude[:, 0] - t) <= dt
        outparam[n, 1] = np.mean(attitude[ind, 1])  # RA
        outparam[n, 2] = np.mean(attitude[ind, 2])  # DEC

    return outparam
Ejemplo n.º 8
0
 def continuum_fit(self, knots=10, plot=False, verbose=False):
     """fits a continuum via a spline through the flux values."""
     knots = 10
     edgeTolerance = 0.1
     remove_orders = []
     for order in self.safe_orders:
         mask = self.Orders[order]['mask']
         if np.sum(mask) < 100:
             remove_orders.append(order)
     for order in remove_orders:
         self.safe_orders.remove(order)
         print("Removing from safe_orders (under 100 useable pixels): ",
               order)
     remove_orders = []
     for order in self.safe_orders:
         mask = self.Orders[order]['mask']
         self.Orders[order]['con'] = np.zeros_like(
             self.Orders[order]['wav'])
         try:
             s = si.LSQUnivariateSpline(self.Orders[order]['wav'][mask],\
                                                             self.Orders[order]['flx'][mask],\
                                                             np.linspace(self.Orders[order]['wav'][mask][0]+edgeTolerance,\
                                                             self.Orders[order]['wav'][mask][-1]-edgeTolerance, knots),\
                                                             w=self.Orders[order]['err'][mask])
             self.Orders[order]['con'][mask] = s(
                 self.Orders[order]['wav'][mask])
         except:
             remove_orders.append(order)
     for order in remove_orders:
         self.safe_orders.remove(order)
         print("Removing from safe_orders (sparse-ness): ", order)
     pass
Ejemplo n.º 9
0
def iter_spline(time, flux, window_length):
    no_knots = (max(time) - min(time)) / window_length
    newflux = flux.copy()
    newtime = time.copy()
    detrended_flux = flux.copy()
    for i in range(constants.PSPLINES_MAXITER):
        mask_outliers = numpy.ma.where(
            1 - detrended_flux < constants.PSPLINES_STDEV_CUT *
            numpy.std(detrended_flux))
        newtime, newflux = cleaned_array(newtime[mask_outliers],
                                         newflux[mask_outliers])
        # knots must not be at the edges, so we take them as [1:-1]
        knots = numpy.linspace(min(newtime), max(newtime), no_knots)[1:-1]
        s = interpolate.LSQUnivariateSpline(newtime, newflux, knots)
        trend_segment = s(newtime)
        detrended_flux = newflux / trend_segment
        mask_outliers = numpy.ma.where(
            1 - detrended_flux > constants.PSPLINES_STDEV_CUT *
            numpy.std(detrended_flux))
        print('Iteration:', i + 1, 'Rejected outliers:', len(mask_outliers[0]))
        # Check convergence
        if len(mask_outliers[0]) == 0:
            print('Converged.')
            break
    # Final iteration applied all data interpolated over clipped values
    trend_segment = s(time)
    return trend_segment
Ejemplo n.º 10
0
def fit1dleg(line, order=3, low=4, high=4, niter=3):
    low, high = float(low), float(high)

    poly = [special.legendre(o) for o in range(order)]
    coeffs = [1.0 for p in poly]

    x = np.arange(line.shape[0])
    xSub = x.copy().astype('float')
    ySub = line.copy()

    #fitfunc = lambda coeff, x: np.sum([p*pol(x) for p,pol in zip(coeff, poly)])
    #errfunc = lambda coeff, x, y: fitfunc(coeff, x) - y
    while niter > 0:
        #coeffs, success = optimize.leastsq(errfunc, coeffs, args=(xSub, ySub))
        #ySub = fitfunc(coeffs,xSub) - line
        spl = interpolate.LSQUnivariateSpline(
            xSub, ySub,
            np.arange(0, xSub.shape[0], int(xSub.shape[0] /
                                            (order + 1)))[:-1] +
            int(0.5 * xSub.shape[0] / (order + 1)))
        yZero = spl(xSub) - ySub
        mask = (yZero > (-low * yZero.std())) & (yZero < (high * yZero.std()))
        ySub = ySub[mask]
        if len(ySub) == len(yZero): break
        xSub = xSub[mask]
        niter -= 1

    return spl(x)
Ejemplo n.º 11
0
    def _fit_gradient(self, Rcut, nr_knots):
        """
        find a curve that smoothly approximates the data in {Ri,V'rep(Ri)}

        Parameters:
        ===========
        Rcut: cutoff radius
        nr_knots: number of equidistant knots. The fewer knots, the smoother the interpolation.

        Returns:
        ========
        a callable functions that gives the gradient V'_rep(x)
        """
        R = array(self.nuclear_separation)
        dVrep = array(self.repulsive_gradient)
        weights = array(self.weights)
        sort_indx = argsort(R)
        print "weights = "
        print "weights.max = %s" % weights.max()
        print "weights.min = %s" % weights.min()
        print weights
        # interior knots
        # should have much less knots than data points
        nr_knots = min(nr_knots, max(1, len(R) / 2))
        # left and right knots are added automatically, compute spacing
        # to leftmost and rightmost interior knots
        dR = (R.max() - R.min()) / float(nr_knots + 1)
        knots = linspace(R.min() + dR, R.max() - dR, nr_knots)
        smoothing_spline = interpolate.LSQUnivariateSpline(
            R[sort_indx], dVrep[sort_indx], knots, w=weights[sort_indx])
        #
        from matplotlib.pyplot import plot, errorbar, show, xlabel, ylabel, legend, title
        #title("FIT to gradient of repulsive potential for %s" % self.name_atom_pair)
        xlabel(r"internuclear distance r [$\AA$]", fontsize=17)
        ylabel(r"$\frac{d V_{rep}}{dr}$ [$eV / \AA$]", fontsize=17)
        for i in range(0, self.curve_counter):
            sel_indx = where(array(self.curve_ID) == i)
            plot(    array(self.nuclear_separation)[sel_indx]*bohr_to_angs, \
                     array(self.repulsive_gradient)[sel_indx]*hartree_to_eV/bohr_to_angs, \
                     "o", label="%s" % self.curve_names[i])

        errorbar(R[sort_indx] * bohr_to_angs,
                 dVrep[sort_indx] * hartree_to_eV / bohr_to_angs,
                 1.0 / weights[sort_indx] * hartree_to_eV / bohr_to_angs,
                 ls="",
                 color="grey",
                 lw=0.5)
        Rarr = linspace(0.0, R.max(), 1000)
        print smoothing_spline(Rarr)
        plot(Rarr * bohr_to_angs,
             smoothing_spline(Rarr) * hartree_to_eV / bohr_to_angs,
             ls="-.",
             label="fit (w/ %d knots)" % nr_knots,
             lw=2,
             color="black")
        legend(loc='lower right', fontsize=17)
        show()
        #
        return smoothing_spline
Ejemplo n.º 12
0
    def fisher_plot(self, data):
        """
        Функция, принимающая на вход сегмент временного ряда.
        Возвращает сплайн с внутренними узлами, определяемыми по критерию Фишера.
        Используются сплайны порядка spline_degree (по умолчанию 3)
        """

        if not isinstance(data, pd.Series):
            data = pd.Series(data)

        p_val = self.p_value
        k_s = self.spline_degree
        val = []
        ind = []

        ind.append(data.index[0])
        left = data.index[0]

        while left in data.index:
            right = left + k_s + 1
            flag = True

            if right > data.index[-1]:
                flag = False
                left = right
            while flag == True:
                v = data.loc[left:right].values
                i = data.loc[left:right].index
                df = interpolate.LSQUnivariateSpline(i, v, [], k=k_s)

                if self.FF(v, df(i)) < p_val:
                    ind.append(right - 1)
                    left = right - 1
                    flag = False
                else:
                    right = right + 1
                    if right + 1 > data.index[-1]:
                        left = right + 1
                        flag = False

        spl = interpolate.LSQUnivariateSpline(data.index,
                                              data.values,
                                              ind[1:-1],
                                              k=k_s)
        return spl
Ejemplo n.º 13
0
 def interDerivs(self,rho,knotList):
     
     # After smoothing, use this function to define the 25 points for values of
     # the derivative
     
     def __call__(self,x):
         # Returns a value of the derivative from fit for  GTEDGE 25 points
         return self.derivValues(x)
     from scipy import interpolate
     y=smooth(rho,[self.smooth.derivative(1)(a) for a in rho])
     self.derivSpline=interpolate.LSQUnivariateSpline(rho,[y(x) for x in rho],bbox=[rho[0],rho[-1]],t=knotList)
     self.derivValues=[self.derivSpline(x) for x in self.x]
Ejemplo n.º 14
0
def fit_continuum_lsq(spec,
                      knots,
                      exclude=[],
                      maxiter=3,
                      sigma_lo=2,
                      sigma_hi=2,
                      get_edges=False,
                      **kwargs):
    """ Fit least squares continuum through spectrum data using specified knots, return model """
    assert np.all(np.array(list(map(len, exclude))) == 2), exclude
    assert np.all(np.array(list(map(lambda x: x[0] < x[1], exclude)))), exclude
    x, y, w = spec.dispersion, spec.flux, spec.ivar

    # This is a mask marking good pixels
    mask = np.ones_like(x, dtype=bool)
    # Exclude regions
    for xmin, xmax in exclude:
        mask[(x >= xmin) & (x <= xmax)] = False
    # Get rid of bad fluxes
    mask[np.abs(y) < 1e-6] = False
    mask[np.isnan(y)] = False
    if get_edges:
        left = np.where(mask)[0][0]
        right = np.where(mask)[0][-1]

    for iter in range(maxiter):
        # Make sure there the knots don't hit the edges
        wmin = x[mask].min()
        wmax = x[mask].max()
        while knots[-1] >= wmax:
            knots = knots[:-1]
        while knots[0] <= wmin:
            knots = knots[1:]

        try:
            fcont = interpolate.LSQUnivariateSpline(x[mask],
                                                    y[mask],
                                                    knots,
                                                    w=w[mask],
                                                    **kwargs)
        except ValueError:
            print("Knots:", knots)
            print("xmin, xmax = {:.4f}, {:.4f}".format(wmin, wmax))
            raise
        # Iterative rejection
        cont = fcont(x)
        sig = (cont - y) * np.sqrt(w)
        sig /= np.nanstd(sig)
        mask[sig > sigma_hi] = False
        mask[sig < -sigma_lo] = False
    if get_edges:
        return fcont, left, right
    return fcont
Ejemplo n.º 15
0
def find_and_add_zRange(astig_library, rough_knot_spacing=50.):
    """
    Find range about highest intensity point over which sigmax - sigmay is monotonic.
    Note that astig_library[psfIndex]['zCenter'] should contain the offset in nm to the brightest z-slice

    Parameters
    ----------
    astig_library : List
        Elements are dictionaries containing PSF fit information
    rough_knot_spacing : Float
        Smoothing is applied to (sigmax-sigmay) before finding the region over which it is monotonic. A cubic spline is
        fit to (sigmax-sigmay) using knots spaced roughly be rough_knot_spacing (units of nanometers, i.e. that of
        astig_library[ind]['z']). To make deciding the knots convenient, they are spaced an integer number of z-steps,
        so the actual knot spacing is rounded to this.

    Returns
    -------
    astig_library : List
        The astigmatism calibration list which is taken as an input is modified in place and returned.

    """
    import scipy.interpolate as terp

    for ii in range(len(astig_library)):
        # figure out where to place knots. Note that we subsample our z-positions so we satisfy Schoenberg-Whitney
        # conditions, i.e. that our spline has adequate support
        z_steps = np.unique(astig_library[ii]['z'])
        dz_med = np.median(np.diff(z_steps))
        smoothing_factor = max(int(rough_knot_spacing / dz_med), 2)  # make sure knots are adequately supported
        knots = z_steps[1:-1:smoothing_factor]
        # make the spline
        dsig = terp.LSQUnivariateSpline(astig_library[ii]['z'], astig_library[ii]['dsigma'], knots)

        # mask where the sign is the same as the center
        zvec = np.linspace(np.min(astig_library[ii]['z']), np.max(astig_library[ii]['z']), 1000)
        sgn = np.sign(np.diff(dsig(zvec)))
        halfway = np.absolute(zvec - astig_library[ii]['zCenter']).argmin()  # len(sgn)/2
        notmask = sgn != sgn[halfway]

        # find region of dsigma which is monotonic after smoothing
        try:
            lowerZ = zvec[np.where(notmask[:halfway])[0].max()]
        except ValueError:
            lowerZ = zvec[0]
        try:
            upperZ = zvec[(halfway + np.where(notmask[halfway:])[0].min() - 1)]
        except ValueError:
            upperZ = zvec[-1]
        astig_library[ii]['zRange'] = [lowerZ, upperZ]

    return astig_library
Ejemplo n.º 16
0
 def __init__(self, offset_data: numpy.ndarray, internal_knots: List[float],
              x_values: numpy.ndarray):
     self.spline_obj = scipy_interpolate.LSQUnivariateSpline(
         x=offset_data[self.KEY_RECEPTION_TIME],
         y=offset_data[self.KEY_OFFSET],
         t=internal_knots,
         bbox=[None, None],  # bounding box
         k=self._SPLINE_DEGREE)
     unstructured_data = numpy.array([x_values,
                                      self.spline_obj(x_values)]).T
     structured_data = recfunctions.unstructured_to_structured(
         unstructured_data, dtype=self.DTYPE)
     super(OffsetSpline, self).__init__(structured_data)
     self.mean = numpy.mean(self.offsets)
Ejemplo n.º 17
0
def detrend(y_data, detrend_method='poly', max_order=1, n_knots=10, psd=None):
    """
    Remove linear or polynomial trend of order max_order
    Parameter
    ----------
    y_data : 1d array
        input data of size n
    max_order : int
        maximum order of the polynomial to fit
    psd : 1d array
        noise PSD at Fourier frequencies (size n)

    Returns
    -------
    y_detrend : 1d array
        output detrended data (size n)

    """

    t_norm = np.arange(0, y_data.shape[0])

    if detrend_method == 'poly':

        mat_linear = np.hstack(
            [np.array([t_norm**k]).T for k in range(0, max_order + 1)])
        if psd is not None:
            amp = regression.generalized_least_squares(fft(mat_linear, axis=0),
                                                       fft(y_data), psd)
        else:
            amp = regression.least_squares(mat_linear, y_data)

        trend = np.real(np.dot(mat_linear, amp))
        y_detrend = y_data - trend

    elif detrend_method == 'spline':
        # Detrending using splines
        n_seg = y_data.shape[0] // n_knots
        t_knots = np.linspace(t_norm[n_seg], t_norm[-n_seg], n_knots)
        spl = interpolate.LSQUnivariateSpline(t_norm,
                                              y_data,
                                              t_knots,
                                              k=3,
                                              ext="const")
        trend = spl(t_norm)
        y_detrend = y_data - trend

    return y_detrend, trend
Ejemplo n.º 18
0
def unispline(params, timeflux, etc = []):
    """
    This function fits the stellar flux using a univariate spline.

    Parameters
    ----------
    nknots      : Number of knots
    k           : degree polynomial
    time        : BJD_TDB or phase
    flux        : flux
    etc         : model flux

    Returns
    -------
    This function returns an array of flux values...

    Revisions
    ---------
    2016-04-10	Kevin Stevenson, UChicago  
		        [email protected]
	            Original Version
    """
    nknots, k   = params
    time, flux  = timeflux
    iknots      = np.linspace(time[2], time[-3], nknots)
    splflux     = flux / etc
    
    sp          = spi.LSQUnivariateSpline(time, splflux, iknots, k=k)#, w=good)
    saplev      = sp(time)
    
    #tck = spi.bisplrep(xknots.flatten(), yknots.flatten(), ipparams, kx=3, ky=3)
    #print(tck)
    #tck = [yknots, xknots, ipparams, 3, 3]
    #func = spi.interp2d(xknots, yknots, ipparams, kind='cubic'
    #output = np.ones(time.size)
    #for i in range(time.size):
    #    output[i] = spi.bisplev(x[i], y[i], tck, dx=0, dy=0)
    
    return saplev/np.mean(saplev)
    '''
    #Splines
    ev[i].sp.nknots         = 70
    ev[i].sp.nsigma         = 8
    ev[i].sp.maxiter        = 500
    '''
        
Ejemplo n.º 19
0
Archivo: psd.py Proyecto: qbaghi/mecm
    def spline_lsqr(self, per):
        """

        Fit a spline to the log periodogram using least-squares

        Parameters
        ----------
        per : ndarray
            periodogram
        ext : extint or str, optional
            Controls the extrapolation mode for elements not in the interval
            defined by the knot sequence.
                if ext=0 or ‘extrapolate’, return the extrapolated value.
                if ext=1 or ‘zeros’, return 0
                if ext=2 or ‘raise’, raise a ValueError
                if ext=3 of ‘const’, return the boundary value
            The default value is 3.


        """

        NI = len(per)

        if NI not in list(self.logf.keys()):
            f = np.fft.fftfreq(NI)*self.fs
            self.logf[NI] = np.log(f[f > 0])
        else:
            f = np.concatenate(([0], np.exp(self.logf[NI])))

        n = np.int((NI-1)/2.)
        z = per[1:n + 1]
        v = np.log(z) - self.C0

        # Spline estimator of the log-PSD
        inds_est = np.where((self.f_min_est <= f[1:self.n + 1])
                            & (f[1:self.n + 1] <= self.f_max_est))[0]
        spl = interpolate.LSQUnivariateSpline(self.logf[NI][inds_est],
                                              v[inds_est],
                                              self.logf_knots,
                                              k=self.d,
                                              ext=self.ext)

        return spl
Ejemplo n.º 20
0
def smooth(inx,iny,smoothCon=None,knots=None):
    from scipy import interpolate
    x=[inx[r] for r in range(len(inx))]
    y=[iny[s] for s in range(len(iny))]
#    for a,b in zip(x,y):
#        if abs(b)<=1e-19:
#            print "value removed!"
#            x.remove(a)
#            y.remove(b)
    # Remove values greater than 2 std away
#    std=np.std(y)
#    avg=np.average(y)
#    for a,b in zip(x,y):
#        if abs(avg-b)>2.*std:
#            x.remove(a)
#            y.remove(b)

    try:
        tempArray=np.column_stack((x,y))
    except:
#        print x
 #       print y
        raise "Crash!"
    for n in range(len(tempArray)):
        try: 
            if tempArray[n,1]==0.:
                tempArray=np.delete(tempArray,n,0)
        except:
            pass
#            print y
    if smoothCon!=None:
        a=[tempArray[n][0] for n in range(len(tempArray))]
        b=[tempArray[n][1] for n in range(len(tempArray))]
        spline=interpolate.UnivariateSpline(a,b,bbox=[tempArray[0][0],tempArray[-1][0]],s=smoothCon)
    elif knots!=None:
        a=[tempArray[n][0] for n in range(len(tempArray))]
        b=[tempArray[n][1] for n in range(len(tempArray))]
        spline=interpolate.LSQUnivariateSpline(a,b,bbox=[tempArray[0][0],tempArray[-1][0]],t=knots)
    else:
        a=[tempArray[n][0] for n in range(len(tempArray))]
        b=[tempArray[n][1] for n in range(len(tempArray))]
        spline=interpolate.UnivariateSpline(a,b,bbox=[tempArray[0][0],tempArray[-1][0]])
    return spline
Ejemplo n.º 21
0
    def __call__(self, data):
        if not isinstance(data, pd.Series):
            data = pd.Series(data)

        if len(data.index) < (self.spline_degree + 1) * self.seg_number:
            raise DataLengthException(
                "Too short data, try to decrease seg_number")
        #spls = []
        spls_coeffs = np.array([])
        for i in range(0, self.seg_number):
            start = (len(data.index) * i) // self.seg_number
            end = (len(data.index) * (i + 1)) // self.seg_number
            spl = interpolate.LSQUnivariateSpline(data.index[start:end],
                                                  data.values[start:end],
                                                  t=[],
                                                  k=self.spline_degree)
            #spls.append(spl)
            spls_coeffs = np.concatenate((spls_coeffs, spl.get_coeffs()))
        return spls_coeffs
Ejemplo n.º 22
0
 def continuum_fit(self, knots=10, plot=False, verbose=False):
     """fits a continuum via a spline through the flux values."""
     knots = 10
     edgeTolerance = 0.1
     for order in self.safe_orders:
         mask = self.Orders[order]['mask']
         self.Orders[order]['con'] = np.zeros_like(
             self.Orders[order]['wav'])
         if len(self.Orders[order]['wav'][mask]) < 10:
             self.safe_orders.remove(order)
             print "Removing from safe_orders: ", order
             continue
         s = si.LSQUnivariateSpline(self.Orders[order]['wav'][mask],\
                                                             self.Orders[order]['flx'][mask],\
                                                             np.linspace(self.Orders[order]['wav'][mask][0]+edgeTolerance,\
                                                             self.Orders[order]['wav'][mask][-1]-edgeTolerance, knots),\
                                                             w=self.Orders[order]['err'][mask])
         self.Orders[order]['con'][mask] = s(
             self.Orders[order]['wav']
             [mask])  # new array is made -- continuum
     pass
Ejemplo n.º 23
0
    def _spline(self, data):

        spike_width = self.smoothing
        x = np.arange(data.shape[0])
        # Final iteration has knots separated by "spike_width".
        npieces = int(data.shape[0] / spike_width)
        psize = (x[-1] + 1) / npieces
        firstindex = int((data.shape[0] % psize))
        indices = np.trim_zeros(np.arange(firstindex, data.shape[0], psize))

        #remove the masked indices
        indices = [index for index in indices if ~data.mask[index]]
        # Get the final background.
        finalfit = interpolate.LSQUnivariateSpline(x,
                                                   data.data,
                                                   indices,
                                                   k=3,
                                                   w=(~data.mask).astype(
                                                       np.float))
        background = np.asarray(finalfit(x), data.dtype)

        return background
Ejemplo n.º 24
0
def smooth_spec(x, y, v, s=None, w=15, sfunc='sp', order=2, verbose=False):
    """Smooth a given spectrum."""
    if sfunc == 'sp':
        if s == None:
            sp = interpolate.LSQUnivariateSpline(x, y, t=(x[::12])[1:], w=1 / (S.sqrt(v)))
        else:
            if isinstance(s, list):
                s = s[0]
            if s <= 1:
                s *= len(x)
            sp = interpolate.UnivariateSpline(x, y, w=1 / (S.sqrt(v)), s=s)
        ysmooth = sp(x)
    elif sfunc == 'sg':
        kernel = (int(w) * 2) + 1
        if kernel <= order + 2:
            if verbose:
                print "<smooth_spec> WARNING: w  not > order+2 "\
                      "(%d <= %d+2). Replaced it by first odd number "\
                      "above order+2" % (kernel, order)
            kernel = int(order / 2) * 2 + 3
        ysmooth = sg(y, kernel=kernel, order=order)
    return ysmooth
Ejemplo n.º 25
0
def do(fields, helper):
    """Actually do the CurveOperationPlugin calculation."""
    op = fields['operation'].lower()
    xtol = fields['tolerance']
    # get the input dataset - helper provides methods for getting other
    # datasets from Veusz
    ay = np.array(helper.getDataset(fields['ay']).data)
    ax = np.array(helper.getDataset(fields['ax']).data)
    N = len(ax)
    if len(ay) != N:
        raise plugins.DatasetPluginException(
            'Curve A X,Y datasets must have same length')
    by = np.array(helper.getDataset(fields['by']).data)
    bx = np.array(helper.getDataset(fields['bx']).data)
    Nb = len(bx)
    if len(by) != Nb:
        raise plugins.DatasetPluginException(
            'Curve B X,Y datasets must have same length')
    error = 0

    # Relativize
    if fields['relative']:
        d = by[0] - ay[0]
        by -= d
        logging.debug('relative correction', d)

    # If the two curves share the same X dataset, directly operate
    if fields['bx'] == fields['ax']:
        out = numexpr.evaluate(op, local_dict={'a': ay, 'b': by})
        return out, 0

    # Smooth x data
    if fields['smooth']:
        ax = utils.smooth(ax)
        bx = utils.smooth(bx)

    # Rectify x datas so they can be used for interpolation
    if xtol > 0:
        rax, dax, erra = utils.rectify(ax)
        rbx, dbx, errb = utils.rectify(bx)
        logging.debug('rectification errors', erra, errb)
        if erra > xtol or errb > xtol:
            raise plugins.DatasetPluginException(
                'X Datasets are not comparable in the required tolerance.')
    # TODO: manage extrapolation!
    # Get rectified B(x) spline for B(y)
    logging.debug('rbx', rbx[-1] - bx[-1], rbx)
    logging.debug('by', by)
    N = len(rbx)
    margin = 1 + int(N / 10)
    step = 2 + int((N - 2 * margin) / 100)
    logging.debug( 'interpolating', len(rbx), len(by), margin, step)
    bsp = interpolate.LSQUnivariateSpline(rbx, by, rbx[margin:-margin:step]) #ext='const' scipy>=0.15
    error = bsp.get_residual()
    # Evaluate B(y) spline with rectified A(x) array
    b = bsp(rax)
    logging.debug('rax', rax[-1] - ax[-1], rax)
    logging.debug('a', ay)
    logging.debug('b', b, b[1000:1010])
#   np.save('/tmp/misura/rbx',rbx)
#   np.save('/tmp/misura/by',by)
#   np.save('/tmp/misura/rax',rax)
#   np.save('/tmp/misura/ay',ay)
    # Perform the operation using numexpr
    out = numexpr.evaluate(op, local_dict={'a': ay, 'b': b})
    logging.debug('out', out)
    return out, error
Ejemplo n.º 26
0
    def estimate(self, freq, per):
        """
        Estimate the spline coefficients from the periodogram 
        or cross-periodogram.

        Parameters
        ----------
        freq : ndarray
            frequency vector
        per : ndarray
            periodogram computed at frequencies freq
        complex : bool
            if True, per is assumed to be a complex cross-periodogram. 
            Thus, its phase is estimated along with its amplitude. 
        """
        if not self.cross:
            # If the frequencies are given
            v = np.log(per.real) - self.c0
            # Spline estimator of the log-PSD
            self.log_psd_fn = interpolate.LSQUnivariateSpline(np.log(freq),
                                                              v,
                                                              self.logf_knots,
                                                              k=self.d,
                                                              ext=self.ext)
            # self.log_psd_fn = MyLSQUnivariateSpline(np.log(freq), v,
            #                                         self.logf_knots,
            #                                         k=self.d,
            #                                         ext=self.ext)
            # Save the values of the log-PSD at the frequency knots
            self.logs_knots = self.log_psd_fn(self.logf_knots)
            
        else:
            # # If the frequencies are given
            # v_amp = np.log(np.abs(per)) - self.c0
            # v_ang = np.angle(per)
            # # Spline estimator of the log-PSD
            # spl_amp = interpolate.LSQUnivariateSpline(np.log(freq),
            #                                           v_amp,
            #                                           self.logf_knots,
            #                                           k=self.d,
            #                                           ext=self.ext)

            # spl_ang = interpolate.LSQUnivariateSpline(freq,
            #                                           v_ang,
            #                                           self.f_knots,
            #                                           k=self.d,
            #                                           ext=self.ext)
            # self.log_psd_fn = lambda x: spl_amp(x) + 1j * spl_ang(np.exp(x))
            # self.psd_fn = lambda x: np.exp(spl_amp(np.log(x)) + 1j * spl_ang(x))
            
            # Spline estimator of real part of the cross-spectrum
            spl_real = interpolate.LSQUnivariateSpline(freq,
                                                       per.real,
                                                       self.f_knots,
                                                       k=self.d,
                                                       ext=self.ext)
            # Spline estimator of real part of the cross-spectrum
            spl_imag = interpolate.LSQUnivariateSpline(freq,
                                                       per.imag,
                                                       self.f_knots,
                                                       k=self.d,
                                                       ext=self.ext)
            self.psd_fn = lambda x: spl_real(x) + 1j * spl_imag(x)
Ejemplo n.º 27
0
    model = GeneratedModel(sigmad=given['sigmad'])

    # Choose estimation problem coarse time grid and prepare measurements
    subdivide = 2
    t = np.linspace(tmeas[0], tmeas[-1], subdivide * (nmeas - 1) + 1)
    y = np.ma.masked_all((t.size, 1))
    y[::subdivide] = meas[:, 1:]

    # Create estimation problem
    problem = jme.Problem(model, t, y, u)
    tc = problem.tc

    # Create LSQ spline approximation of the measurements for the initial guess
    Tknot = 1.05
    knots = np.arange(tmeas[0] + 2 * Tknot, tmeas[-1] - 2 * Tknot, Tknot)
    z_guess = interpolate.LSQUnivariateSpline(tmeas, meas[:, 1], knots, k=5)

    # Make Linear Least Squares approximation of the parameters
    psi = np.c_[-z_guess(tmeas)**3, -z_guess(tmeas), -z_guess(tmeas, 1)]
    (a, b, d), *_ = np.linalg.lstsq(psi, z_guess(tmeas, 2), rcond=None)
    meas_std = np.std(y.flatten() - z_guess(t))

    # Set bounds
    lower = {'meas_std': 1e-3}
    constr_bounds = np.zeros((2, problem.ncons))
    dec_bounds = np.repeat([[-np.inf], [np.inf]], problem.ndec, axis=-1)
    dec_L, dec_U = dec_bounds
    for k, v in lower.items():
        problem.set_decision_item(k, v, dec_L)

    # Set initial guess
Ejemplo n.º 28
0
Archivo: result.py Proyecto: IOMRC/piv
    def ds(self):
        if self._ds is None:
            file_exists = os.path.exists(self._result_file)

            reprocess = not file_exists or self._reprocess

            if reprocess:
                if file_exists:
                    print('Old file exists ' + self._result_file)
                    #print('Removing old file ' + self._result_file)
                    #shutil.rmtree(self._result_file)

                ds_data = OrderedDict()

                to_seconds = np.vectorize(
                    lambda x: x.seconds + x.microseconds / 1E6)

                print('Processing binary data...')
                xx, yy, zz = self._loadgrid()
                if xx is None:
                    if self._from_nc:
                        print('Processing existing netcdf...')
                        fn = self._result_file[:-5] + '_QC_raw.nc'
                        if os.path.exists(fn):
                            ds_temp = xr.open_dataset(self._result_file[:-5] +
                                                      '_QC_raw.nc',
                                                      chunks={'time': 50})
                            u = da.transpose(ds_temp['U'].data,
                                             axes=[3, 0, 1, 2])
                            v = da.transpose(ds_temp['V'].data,
                                             axes=[3, 0, 1, 2])
                            w = da.transpose(ds_temp['W'].data,
                                             axes=[3, 0, 1, 2])
                            tt = ds_temp['time']
                            te = (tt - tt[0]) / np.timedelta64(1, 's')
                            xx = ds_temp['x'].values
                            yy = ds_temp['y'].values
                            zz = ds_temp['z'].values
                        else:
                            print('USING OLD ZARR DATA')
                            ds_temp = xr.open_zarr(self._result_file)
                            u = da.transpose(ds_temp['U'].data,
                                             axes=[3, 0, 1, 2])
                            v = da.transpose(ds_temp['V'].data,
                                             axes=[3, 0, 1, 2])
                            w = da.transpose(ds_temp['W'].data,
                                             axes=[3, 0, 1, 2])
                            tt = ds_temp['time']
                            te = (tt - tt[0]) / np.timedelta64(1, 's')
                            xx = ds_temp['x'].values
                            yy = ds_temp['y'].values
                            zz = ds_temp['z'].values
                            print('ERROR: No NetCDF data found for ' +
                                  self._xml_file)
                            #return None
                            # print(u.shape)

                else:
                    tt, uvw = self._loaddata(xx, yy, zz)
                    if tt is None:
                        print('ERROR: No binary data found for ' +
                              self._xml_file)
                        return None

                    # calculate the elapsed time from the Timestamp objects and then convert to datetime64 datatype
                    te = to_seconds(tt - tt[0])
                    tt = pd.to_datetime(tt)
                    uvw = uvw.persist()
                    u = uvw[:, :, :, :, 0]
                    v = uvw[:, :, :, :, 1]
                    w = uvw[:, :, :, :, 2]


#                    u = xr.DataArray(uvw[:,:,:,:,0], coords=[tt, xx, yy, zz], dims=['time','x', 'y', 'z'],
#                                     name='U', attrs={'standard_name': 'sea_water_x_velocity', 'units': 'm s-1'})
#                    v = xr.DataArray(uvw[:,:,:,:,1], coords=[tt, xx, yy, zz], dims=['time', 'x', 'y', 'z'],
#                                     name='V', attrs={'standard_name': 'sea_water_x_velocity', 'units': 'm s-1'})
#                    w = xr.DataArray(uvw[:,:,:,:,2], coords=[tt, xx, yy, zz], dims=['time', 'x', 'y', 'z'],
#                                     name='W', attrs={'standard_name': 'upward_sea_water_velocity', 'units': 'm s-1'})

                if xx is None:
                    print('No data found')
                    return None

                u = u.persist()
                v = v.persist()
                w = w.persist()

                dx = float(xx[1] - xx[0])
                dy = float(yy[1] - yy[0])
                dz = float(zz[1] - zz[0])

                if self._norm_dims:
                    exp = self._result_root.split('/')[4]
                    runSheet = pd.read_csv('~/RunSheet-%s.csv' % exp)
                    runSheet = runSheet.set_index('RunID')
                    runDetails = runSheet.ix[int(self.run_id[-2:])]

                    T = runDetails['T (s)']
                    h = runDetails['h (m)']
                    D = runDetails['D (m)']

                    ww = te / T
                    om = 2. * np.pi / T
                    d_s = (2. * 1E-6 / om)**0.5
                    bl = 3. * np.pi / 4. * d_s

                    if exp == 'Exp6':
                        if D == 0.1:
                            dy_c = (188. + 82.) / 2
                            dx_c = 39.25
                            cx = dx_c / 1000.
                            cy = dy_c / 1000.
                        else:
                            dy_c = (806. + 287.) / 2. * 0.22
                            dx_c = 113 * 0.22
                            cx = dx_c / 1000.
                            cy = dy_c / 1000.
                    elif exp == 'Exp8':
                        dy_c = 624 * 0.22
                        dx_c = 15
                        cx = dx_c / 1000.
                        cy = dy_c / 1000.
                    xn = (xx + (D / 2. - cx)) / D
                    yn = (yy - cy) / D
                    zn = zz / h

                    xnm, ynm = np.meshgrid(xn, yn)
                    rr = np.sqrt(xnm**2. + ynm**2)
                    cylMask = rr < 0.5

                    nanPlane = np.ones(cylMask.shape)
                    nanPlane[cylMask] = np.nan
                    nanPlane = nanPlane.T
                    nanPlane = nanPlane[np.newaxis, :, :, np.newaxis]

                    u = u * nanPlane
                    v = v * nanPlane
                    w = w * nanPlane

                    if D == 0.1:
                        xInds = xn > 3.
                    else:
                        xInds = xn > 2.

                    blInd = np.argmax(zn > bl / h)
                    blPlane = int(round(blInd))

                    Ue = u[:, xInds, :, :]
                    Ue_bar = da.nanmean(Ue, axis=(1, 2, 3)).compute()
                    Ue_bl = da.nanmean(Ue[:, :, :, blPlane],
                                       axis=(1, 2)).compute()

                    inds = ~np.isnan(Ue_bl)

                    xv = ww[inds] % 1.
                    xv = xv + np.random.normal(scale=1E-6, size=xv.shape)
                    yv = Ue_bl[inds]
                    xy = np.stack([
                        np.concatenate([xv - 1., xv, xv + 1.]),
                        np.concatenate([yv, yv, yv])
                    ]).T
                    xy = xy[xy[:, 0].argsort(), :]
                    xi = np.linspace(-0.5, 1.5, len(xv) / 8)
                    n = np.nanmax(xy[:, 1])
                    # print(n)
                    # fig,ax = pl.subplots()
                    # ax.scatter(xy[:,0],xy[:,1]/n)
                    # print(xy)
                    spl = si.LSQUnivariateSpline(xy[:, 0],
                                                 xy[:, 1] / n,
                                                 t=xi,
                                                 k=3)
                    roots = spl.roots()
                    der = spl.derivative()
                    slope = der(roots)
                    inds = np.min(np.where(slope > 0))
                    dt = (roots[inds] % 1.).mean() - 0.5

                    tpx = np.arange(0, 0.5, 0.001)
                    U0_bl = np.abs(spl(tpx + dt).min() * n)
                    ws = ww - dt
                    Ue_spl = spl((ws - 0.5) % 1.0 + dt) * n * -1.0

                    #maxima = spl.derivative().roots()
                    #Umax = spl(maxima)
                    #UminIdx = np.argmin(Umax)
                    #U0_bl = np.abs(Umax[UminIdx]*n)

                    #ww_at_min = maxima[UminIdx]
                    #ws = ww - ww_at_min + 0.25

                    inds = ~np.isnan(Ue_bar)

                    xv = ww[inds] % 1.
                    xv = xv + np.random.normal(scale=1E-6, size=xv.shape)
                    yv = Ue_bar[inds]
                    xy = np.stack([
                        np.concatenate([xv - 1., xv, xv + 1.]),
                        np.concatenate([yv, yv, yv])
                    ]).T
                    xy = xy[xy[:, 0].argsort(), :]
                    xi = np.linspace(-0.5, 1.5, len(xv) / 8)
                    n = np.nanmax(xy[:, 1])
                    spl = si.LSQUnivariateSpline(xy[:, 0],
                                                 xy[:, 1] / n,
                                                 t=xi,
                                                 k=4)
                    maxima = spl.derivative().roots()
                    Umax = spl(maxima)
                    UminIdx = np.argmin(Umax)
                    U0_bar = np.abs(Umax[UminIdx] * n)

                    ww = xr.DataArray(ww, coords=[
                        tt,
                    ], dims=[
                        'time',
                    ])
                    ws = xr.DataArray(ws - 0.5, coords=[
                        tt,
                    ], dims=[
                        'time',
                    ])

                    xn = xr.DataArray(xn, coords=[
                        xx,
                    ], dims=[
                        'x',
                    ])
                    yn = xr.DataArray(yn, coords=[
                        yy,
                    ], dims=[
                        'y',
                    ])
                    zn = xr.DataArray(zn, coords=[
                        zz,
                    ], dims=[
                        'z',
                    ])

                    Ue_bar = xr.DataArray(Ue_bar,
                                          coords=[
                                              tt,
                                          ],
                                          dims=[
                                              'time',
                                          ])
                    Ue_bl = xr.DataArray(Ue_bl, coords=[
                        tt,
                    ], dims=[
                        'time',
                    ])
                    Ue_spl = xr.DataArray(Ue_spl,
                                          coords=[
                                              tt,
                                          ],
                                          dims=[
                                              'time',
                                          ])

                    ds_data['ww'] = ww
                    ds_data['ws'] = ws

                    ds_data['xn'] = xn
                    ds_data['yn'] = yn
                    ds_data['zn'] = zn

                    ds_data['Ue_bar'] = Ue_bar
                    ds_data['Ue_bl'] = Ue_bl
                    ds_data['Ue_spl'] = Ue_spl

                te = xr.DataArray(te, coords=[
                    tt,
                ], dims=[
                    'time',
                ])

                dims = ['time', 'x', 'y', 'z']
                coords = [tt, xx, yy, zz]

                ds_data['U'] = xr.DataArray(u,
                                            coords=coords,
                                            dims=dims,
                                            name='U',
                                            attrs={
                                                'standard_name':
                                                'sea_water_x_velocity',
                                                'units': 'm s-1'
                                            })
                ds_data['V'] = xr.DataArray(v,
                                            coords=coords,
                                            dims=dims,
                                            name='V',
                                            attrs={
                                                'standard_name':
                                                'sea_water_x_velocity',
                                                'units': 'm s-1'
                                            })
                ds_data['W'] = xr.DataArray(w,
                                            coords=coords,
                                            dims=dims,
                                            name='W',
                                            attrs={
                                                'standard_name':
                                                'sea_water_x_velocity',
                                                'units': 'm s-1'
                                            })
                ds_data['te'] = te

                # stdV = da.nanstd(v)
                # stdW = da.nanstd(w)
                # thres=7.
                if 'U0_bl' in locals():
                    condition = (da.fabs(v) / U0_bl >
                                 1.5) | (da.fabs(w) / U0_bl > 0.6)
                    for var in ['U', 'V', 'W']:
                        ds_data[var].data = da.where(condition, np.nan,
                                                     ds_data[var].data)

                piv_step_frame = float(
                    self._xml_root.findall('piv/stepFrame')[0].text)

                print('Calculating tensor')
                # j = jacobianConv(ds.U, ds.V, ds.W, dx, dy, dz, sigma=1.5)
                j = jacobianDask(u, v, w, piv_step_frame, dx, dy, dz)
                print('Done')
                #j = da.from_array(j,chunks=(20,-1,-1,-1,-1,-1))

                #                j = jacobianDask(uvw[:,:,:,:,0],uvw[:,:,:,:,1], uvw[:,:,:,:,2], piv_step_frame, dx, dy, dz)
                jT = da.transpose(j, axes=[0, 1, 2, 3, 5, 4])

                #                j = j.persist()
                #                jT = jT.persist()

                jacobianNorm = da.sqrt(
                    da.nansum(da.nansum(j**2., axis=-1), axis=-1))

                strainTensor = (j + jT) / 2.
                vorticityTensor = (j - jT) / 2.

                strainTensorNorm = da.sqrt(
                    da.nansum(da.nansum(strainTensor**2., axis=-1), axis=-1))
                vorticityTensorNorm = da.sqrt(
                    da.nansum(da.nansum(vorticityTensor**2., axis=-1),
                              axis=-1))
                divergence = j[:, :, :, :, 0, 0] + j[:, :, :, :, 1,
                                                     1] + j[:, :, :, :, 2, 2]
                # print(divergence)
                omx = vorticityTensor[:, :, :, :, 2, 1] * 2.
                omy = vorticityTensor[:, :, :, :, 0, 2] * 2.
                omz = vorticityTensor[:, :, :, :, 1, 0] * 2.

                divNorm = divergence / jacobianNorm

                #                divNorm = divNorm.persist()

                #                divNorm_mean = da.nanmean(divNorm)
                #                divNorm_std = da.nanstd(divNorm)

                dims = ['x', 'y', 'z']
                comp = ['u', 'v', 'w']

                ds_data['jacobian'] = xr.DataArray(
                    j,
                    coords=[tt, xx, yy, zz, comp, dims],
                    dims=['time', 'x', 'y', 'z', 'comp', 'dims'],
                    name='jacobian')

                ds_data['jacobianNorm'] = xr.DataArray(
                    jacobianNorm,
                    coords=[tt, xx, yy, zz],
                    dims=['time', 'x', 'y', 'z'],
                    name='jacobianNorm')

                ds_data['strainTensor'] = xr.DataArray(
                    strainTensor,
                    coords=[tt, xx, yy, zz, comp, dims],
                    dims=['time', 'x', 'y', 'z', 'comp', 'dims'],
                    name='strainTensor')

                ds_data['vorticityTensor'] = xr.DataArray(
                    vorticityTensor,
                    coords=[tt, xx, yy, zz, comp, dims],
                    dims=['time', 'x', 'y', 'z', 'comp', 'dims'],
                    name='vorticityTensor')

                ds_data['vorticityNorm'] = xr.DataArray(
                    vorticityTensorNorm,
                    coords=[tt, xx, yy, zz],
                    dims=['time', 'x', 'y', 'z'],
                    name='vorticityNorm')

                ds_data['strainNorm'] = xr.DataArray(
                    strainTensorNorm,
                    coords=[tt, xx, yy, zz],
                    dims=['time', 'x', 'y', 'z'],
                    name='strainNorm')

                ds_data['divergence'] = xr.DataArray(
                    divergence,
                    coords=[tt, xx, yy, zz],
                    dims=['time', 'x', 'y', 'z'],
                    name='divergence')

                ds_data['omx'] = xr.DataArray(omx,
                                              coords=[tt, xx, yy, zz],
                                              dims=['time', 'x', 'y', 'z'],
                                              name='omx')

                ds_data['omy'] = xr.DataArray(omy,
                                              coords=[tt, xx, yy, zz],
                                              dims=['time', 'x', 'y', 'z'],
                                              name='omy')

                ds_data['omz'] = xr.DataArray(omz,
                                              coords=[tt, xx, yy, zz],
                                              dims=['time', 'x', 'y', 'z'],
                                              name='omz')

                ds_data['divNorm'] = xr.DataArray(divNorm,
                                                  coords=[tt, xx, yy, zz],
                                                  dims=['time', 'x', 'y', 'z'],
                                                  name='divNorm')

                #                ds_data['divNorm_mean'] = xr.DataArray(divNorm_mean)
                #                ds_data['divNorm_std'] = xr.DataArray(divNorm_std)

                ds = xr.Dataset(ds_data)
                #                if self._from_nc:
                #                    for k,v in ds_temp.attrs.items():
                #                        ds.attrs[k]=v
                #ds = ds.chunk({'time': 20})

                self._append_CF_attrs(ds)
                self._append_attrs(ds)
                ds.attrs['filename'] = self._result_file

                if self._norm_dims:

                    KC = U0_bl * T / D
                    delta = (2. * np.pi * d_s) / h
                    S = delta / KC

                    ds.attrs['T'] = T
                    ds.attrs['h'] = h
                    ds.attrs['D'] = D
                    ds.attrs['U0_bl'] = U0_bl
                    ds.attrs['U0_bar'] = U0_bar
                    ds.attrs['KC'] = KC
                    ds.attrs['S'] = S
                    ds.attrs['Delta+'] = ((1E-6 * T)**0.5) / h
                    ds.attrs['Delta_l'] = 2 * np.pi * d_s
                    ds.attrs['Delta_s'] = d_s
                    ds.attrs['Re_D'] = U0_bl * D / 1E-6
                    ds.attrs['Beta'] = D**2. / (1E-6 * T)

                delta = (ds.attrs['dx'] * ds.attrs['dy'] *
                         ds.attrs['dz'])**(1. / 3.)
                dpx = (ds.attrs['pdx'] * ds.attrs['pdy'] *
                       ds.attrs['pdz'])**(1. / 3.)
                delta_px = delta / dpx
                dt = ds.attrs['piv_step_ensemble']

                #                divRMS = da.sqrt(da.nanmean((divergence * dt) ** 2.))
                #                divRMS = divRMS.persist()
                #                vorticityTensorNorm.persist()
                #                velocityError = divRMS/((3./(2.*delta_px**2.))**0.5)
                # print(da.percentile(ds_new['vorticityTensorNorm'].data.ravel(),99.))
                # print(ds_new['divRMS'])
                # print(ds_new['divNorm_mean'])
                #                vorticityError = divRMS/dt/da.percentile(vorticityTensorNorm.ravel(),99.)

                #                divNorm_mean = da.nanmean(divNorm)
                #                divNorm_std = da.nanstd(divNorm)

                # print("initial save")
                #ds.to_zarr(self._result_file,compute=False)
                #ds = xr.open_zarr(self._result_file)

                #                xstart = np.argmax(xx > 0.05)
                #                ystart = np.argmax(yy > 0.07)

                divRMS = da.sqrt(da.nanmean(
                    (divergence * dt)**2.))  #.compute()
                #divNorm = divergence / jacobianNorm
                #divNorm = divNorm.compute()
                #divNorm_mean = da.nanmean(divNorm).compute()
                #divNorm_std = da.nanstd(divNorm).compute()
                velocityError = divRMS / ((3. / (2. * delta_px**2.))**0.5)
                vortNorm = vorticityTensorNorm  #.compute()

                vorticityError = divRMS / dt / np.percentile(
                    vortNorm.ravel(), 99.)

                velocityError, vorticityError = da.compute(
                    velocityError, vorticityError)

                #ds.attrs['divNorm_mean'] = divNorm_mean
                #ds.attrs['divNorm_std'] = divNorm_std
                ds.attrs['velocityError'] = velocityError
                ds.attrs['vorticityError'] = vorticityError

                if self._norm_dims:
                    xInds = (xn > 0.5) & (xn < 2.65)
                    yInds = (yn > -0.75) & (yn < 0.75)
                else:
                    xInds = range(len(ds['x']))
                    yInds = range(len(ds['y']))
                vrms = (ds['V'][:, xInds, yInds, :]**2.).mean(
                    dim=['time', 'x', 'y', 'z'])**0.5
                wrms = (ds['W'][:, xInds, yInds, :]**2.).mean(
                    dim=['time', 'x', 'y', 'z'])**0.5
                ds.attrs['Vrms'] = float(vrms.compute())
                ds.attrs['Wrms'] = float(wrms.compute())

                #fig,ax = pl.subplots()
                #ax.plot(ds.ws,ds.Ue_spl/U0_bl,color='k')
                #ax.plot(ds.ws,ds.Ue_bl/U0_bl,color='g')
                #ax.set_xlabel(r'$t/T$')
                #ax.set_ylabel(r'$U_{bl}/U_0$')
                #fig.savefig(self._result_file[:-4] + 'png',dpi=125)
                #pl.close(fig)
                # print("second save")
                #ds.to_netcdf(self._result_file)
                ds.to_zarr(self._result_file, mode='w')

                print('Cached ' + self._result_file)

                #ds = xr.open_dataset(self._result_file,chunks={'time':20})
                ds = xr.open_zarr(self._result_file)
                ds.attrs['filename'] = self._result_file
            else:
                #ds = xr.open_dataset(self._result_file,chunks={'time':20})
                ds = xr.open_zarr(self._result_file)
                ds.attrs['filename'] = self._result_file

            self._ds = ds

        return self._ds
Ejemplo n.º 29
0
print("spline interpolation in 1-d object oriented:")
x = np.arange(0, 2 * np.pi + np.pi / 4, 2 * np.pi / 8)
y = np.sin(x)
s = interpolate.InterpolatedUnivariateSpline(x, y)
xnew = np.arange(0, 2 * np.pi, np.pi / 50)
ynew = s(xnew)

plt.figure()
plt.plot(x, y, 'x', xnew, ynew, xnew, np.sin(xnew), x, y, 'b')
plt.legend(['Linear', 'InterpolatedUnivariateSpline', 'True'])
plt.axis([-0.05, 6.33, -1.05, 1.05])
plt.title('InterpolatedUnivariateSpline')
plt.show()

t = [np.pi / 2 - .1, np.pi / 2 + .1, 3 * np.pi / 2 - .1, 3 * np.pi / 2 + .1]
s = interpolate.LSQUnivariateSpline(x, y, t, k=2)
ynew = s(xnew)

plt.figure()
plt.plot(x, y, 'x', xnew, ynew, xnew, np.sin(xnew), x, y, 'b')
plt.legend(['Linear', 'LSQUnivariateSpline', 'True'])
plt.axis([-0.05, 6.33, -1.05, 1.05])
plt.title('Spline with Specified Interior Knots')
plt.show()

print("spline interpolation in 2-d procedural (bisplrep):")
x, y = np.mgrid[-1:1:20j, -1:1:20j]
z = (x + y) * np.exp(-6.0 * (x * x + y * y))

plt.figure()
plt.pcolor(x, y, z)
Ejemplo n.º 30
0
def create_normalized_flat(images,
                           imcombinefunc=np.median,
                           colcombinefunc=np.mean,
                           pixsmooth=None):
    """
    Creates a normalized flat to apply to other Doublespec spectra.  Uses a
    smoothing spline

    Parameters
    ----------
    images : list of DoubleSpecImage
        The images to combine to make the flat
    imcombine : function
        The function to combine the images - should have an `axis` argument.
    imcombine : function
        The function to squash the image along the spatial axis - should have an
        `axis` argument.
    pixsmooth : int or None
        The number of pixels to smooth over for any given.  If negative or 0, no
        smoothing will happen. If None, will use the default of 30/10 for
        red/blue side.

    Returns
    -------
    normflat : array
        An array of size matching the images containing the normalized flat -
        i.e., the thing to divide by to flatten the images.
    """
    from warnings import warn
    from scipy import interpolate

    side = images[0].side
    for im in images:
        if im.side != side:
            raise ValueError('gave images from different sides - cannot flat!')
        if 'flat' not in im.objname.lower():
            warn('Object had name {0} which may not be a flat - are you sure '
                 'images are right?'.format(im.objname))

    combflat = imcombinefunc([im.data for im in images], axis=0)

    if side == 'red':
        #100:400 removes the edge effects where no light falls
        response = colcombinefunc(combflat[100:400], axis=0)
    elif side == 'blue':
        #100:340 removes the edge effects where no light falls
        response = colcombinefunc(combflat[:, 100:340], axis=1)
    else:
        raise ValueError('unrecognized side ' + str(side))

    if pixsmooth is None:
        if side == 'red':
            pixsmooth = 30
        elif side == 'blue':
            pixsmooth = 10
        else:
            raise ValueError("pixsmooth defaults don't know side " + str(side))

    if pixsmooth > 0:
        px = np.arange(len(response))
        ks = np.linspace(px[0], px[-1], len(px) / pixsmooth + 2)[1:-1]
        response_func = interpolate.LSQUnivariateSpline(px, response, t=ks)
        response = response_func(px)

    if side == 'red':
        normflat = combflat / response
    elif side == 'blue':
        normflat = combflat / response.reshape(len(response), 1)
    else:
        raise ValueError("flatting doesn't know side " + str(side))

    return normflat