コード例 #1
0
    def get_shape_points(self, n):
        # return [self.xl, self.yl], [self.xu, self.yu]
        c = self.chord
        # Interpolate the points
        l_interp = PchipInterpolator(self.xl, self.yl)
        u_interp = PchipInterpolator(self.xu, self.yu)

        n = n * 5
        beta = np.linspace(0, np.pi, n)  # Use cosine spacing of points.
        x = (1.0 - np.cos(beta)) / 2

        xl = x
        xu = x
        y_offset = np.linspace(0, (self.trailing_edge - self.init_te) / 2, n)

        yl = l_interp(xl) - y_offset
        yu = u_interp(xu) + y_offset

        if False:
            max_x = xu[np.argmax(yu)]  # 0.3
            if max_x > 0.5:
                max_x = 0.5
            max_y = np.max(yu)
            # print np.max(yu), max_y, np.argmax(yu)

            xu = xu - max_x
            xl = xl - max_x

            yu = yu - max_y
            yl = yl - max_y

        yu[0] = yl[0]

        return [[xl[::5] * c, yl[::5] * c], [xu[::5] * c, yu[::5] * c]]
コード例 #2
0
    def __init__(self, P, m_in, T_in, x_in, x_out, debug=False):
        print("I am still alive!")
        self.update(P, m_in, T_in, x_in, x_out)
        TT = np.linspace(self.T_in,self.Tmax,100)
        qfunc = np.vectorize(self._q)
        qq = qfunc(TT)
        if np.isnan(qq).any():
            print("There is a problem with some nans")

        # Create extended domain such that interpolate saturates at endpoints.        
        qq1 = np.resize(qq,qq.size+1)
        TT1 = np.resize(TT,TT.size+1)
        qq1[-1] = qq1[-2]
        TT1[-1] = TT1[-2] + 1
        if (np.diff(qq1) < 0).any():
            print("Captain, it's a non-monotonic function!")
        self.q = PchipInterpolator(TT1,qq1,extrapolate=True)
        
        # Need to use fresh arrays because they are referenced.
        qq2 = np.resize(qq,qq.size+1)
        TT2 = np.resize(TT,TT.size+1)
        qq2[-1] = qq2[-2] * 1.02
        TT2[-1] = TT2[-2]
        self.T = PchipInterpolator(qq2,TT2,extrapolate=True)

        # Show that it worked
        if debug:
            print(tabulate.tabulate(zip(TT1,qq1)))
            print(tabulate.tabulate(zip(TT2,qq2)))
            import matplotlib.pyplot as plt
            plt.figure()                
            plt.plot(TT1,qq1,'.'); plt.title("qqmod vs TTmod for q()")
            plt.figure()
            plt.plot(qq2,TT2,'.'); plt.title("TTmod vs qqmod for T()")
コード例 #3
0
ファイル: source.py プロジェクト: IceCubeOpenSource/ASTERIA
    def __init__(self, model, model_params=None):
        if model_params is None:
            model_params = {}
        if model == 'Nakazato_2013':
            self.model = init_snewpy_model_from_param(model, **model_params)
        else:
            self.model = init_snewpy_model(model, model_params)
        self._interp_lum = {}
        self._interp_meanE = {}
        self._interp_pinch = {}

        for flavor in Flavor:
            t = self.model.time
            self._interp_lum.update({
                flavor:
                PchipInterpolator(t,
                                  self.model.luminosity[flavor],
                                  extrapolate=False)
            })
            self._interp_meanE.update({
                flavor:
                PchipInterpolator(t,
                                  self.model.meanE[flavor],
                                  extrapolate=False)
            })
            self._interp_pinch.update({
                flavor:
                PchipInterpolator(t,
                                  self.model.pinch[flavor],
                                  extrapolate=False)
            })
コード例 #4
0
def cdf_and_inverse(f, a, b, dx):
    """Generate a numerical inverse CDF to the PDF given by f(x)
    f:  The probability density function whose CDF is to be numerically inverted
    a:  The start of the support for f(x)
    b:  The end of the support for f(x)
    dx: The step size to use in sampling on [a, b]    
    """
    # Sample f_X(x) on the interval [a, b] with step size dx
    dx = 0.01
    sample_x = arange_inc(a, b, dx)
    sample_f = np.array([f(x) for x in sample_x])

    # Numerical integral of F using cumtrapz library function
    sample_F = np.zeros_like(sample_f)
    sample_F[1:] = cumtrapz(sample_f, sample_x)
    # Normalize this to guarantee it ranges from [0, 1] notwithstanding any round-off
    sample_F = sample_F / sample_F[-1]

    # Use the Pchip interpolator b/c this guarantees that monotonic input is sent to monotonic output
    # Numerical CDF using interpolation
    F = PchipInterpolator(sample_x, sample_F)
    # Numerical inverse CDF using interpolation
    # Silence these warnings; it's OK, the splined inverse interpolant is horizontal in places but it works
    with np.errstate(divide='ignore', invalid='ignore'):
        F_inv = PchipInterpolator(sample_F, sample_x)
    # Return the splined CDF and inverse CDF function
    return F, F_inv
コード例 #5
0
ファイル: gasoil.py プロジェクト: codacy-badger/pyscal
    def add_gasoil_fromtable(
        self,
        df,
        sgcolname="Sg",
        krgcolname="krg",
        krogcolname="krog",
        pccolname="pcog",
        krgcomment="",
        krogcomment="",
        pccomment="",
    ):
        """Interpolate relpermdata from a dataframe.

        The saturation range with endpoints must be set up beforehand,
        and must be compatible with the tabular input. The tabular
        input will be interpolated to the initialized Sg-table.
        IMPORTANT: Set sgcr and swl to sensible values.

        If you have krg and krog in different dataframes, call this
        function twice

        Calling function is responsible for checking if any data was
        actually added to the table.

        The dataframe input can be constructed using e.g. swof2csv functionality

        """
        from scipy.interpolate import PchipInterpolator

        if sgcolname not in df:
            raise Exception(sgcolname + " not found in dataframe, " +
                            "can't read table data")
        swlfrominput = 1 - df[sgcolname].max()
        if abs(swlfrominput - self.swl) < epsilon:
            print(
                "Warning: swl and 1-max(sg) from incoming table does not seem compatible"
            )
            print("         Do not trust the result near the endpoint.")
        if krgcolname in df:
            pchip = PchipInterpolator(df[sgcolname].astype(float),
                                      df[krgcolname].astype(float))
            # Do not extrapolate this data. We will bfill and ffill afterwards
            self.table["krg"] = pchip(self.table.sg, extrapolate=False)
            self.table["krg"].fillna(method="ffill", inplace=True)
            self.table["krg"].fillna(method="bfill", inplace=True)
            self.krgcomment = "-- krg from tabular input" + krgcomment + "\n"
        if krogcolname in df:
            pchip = PchipInterpolator(df[sgcolname].astype(float),
                                      df[krogcolname].astype(float))
            self.table["krog"] = pchip(self.table.sg, extrapolate=False)
            self.table["krog"].fillna(method="ffill", inplace=True)
            self.table["krog"].fillna(method="bfill", inplace=True)
            self.krogcomment = "-- krog from tabular input" + krogcomment + "\n"
        if pccolname in df:
            pchip = PchipInterpolator(df[sgcolname].astype(float),
                                      df[pccolname].astype(float))
            self.table["pc"] = pchip(self.table.sg, extrapolate=False)
            self.pccomment = "-- pc from tabular input" + pccomment + "\n"
コード例 #6
0
ファイル: bd.py プロジェクト: zyjiang0571/pcc_geo_cnn_v2
def bdsnr(metric_set1, metric_set2, pchip=True):
    """
    BJONTEGAARD    Bjontegaard metric calculation
    Bjontegaard's metric allows to compute the average gain in psnr between two
    rate-distortion curves [1].
    rate1,psnr1 - RD points for curve 1
    rate2,psnr2 - RD points for curve 2

    returns the calculated Bjontegaard metric 'dsnr'

    code adapted from code written by : (c) 2010 Giuseppe Valenzise
    http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
    """
    # pylint: disable=too-many-locals
    # numpy seems to do tricks with its exports.
    # pylint: disable=no-member
    # map() is recommended against.
    # pylint: disable=bad-builtin
    metric_set1 = preprocess(metric_set1, 0)
    metric_set2 = preprocess(metric_set2, 0)
    rate1 = [x[0] for x in metric_set1]
    psnr1 = [x[1] for x in metric_set1]
    rate2 = [x[0] for x in metric_set2]
    psnr2 = [x[1] for x in metric_set2]

    log_rate1 = list(map(math.log, rate1))
    log_rate2 = list(map(math.log, rate2))

    # Integration interval.
    min_int = max([min(log_rate1), min(log_rate2)])
    max_int = min([max(log_rate1), max(log_rate2)])

    if pchip:
        poly1 = PchipInterpolator(log_rate1, psnr1)
        poly2 = PchipInterpolator(log_rate2, psnr2)

        int1 = poly1.integrate(min_int, max_int)
        int2 = poly2.integrate(min_int, max_int)
    else:
        # Best cubic poly fit for graph represented by log_ratex, psrn_x.
        poly1 = np.polyfit(log_rate1, psnr1, 3)
        poly2 = np.polyfit(log_rate2, psnr2, 3)

        # Integrate poly1, and poly2.
        p_int1 = np.polyint(poly1)
        p_int2 = np.polyint(poly2)

        # Calculate the integrated value over the interval we care about.
        int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)
        int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)

    # Calculate the average improvement.
    if max_int != min_int:
        avg_diff = (int2 - int1) / (max_int - min_int)
    else:
        avg_diff = 0.0
    return avg_diff
コード例 #7
0
    def add_oilwater_fromtable(
        self,
        df,
        swcolname="Sw",
        krwcolname="krw",
        krowcolname="krow",
        pccolname="pcow",
        krwcomment="",
        krowcomment="",
        pccomment="",
    ):
        """Interpolate relpermdata from a dataframe.

        The saturation range with endpoints must be set up beforehand,
        and must be compatible with the tabular input. The tabular
        input will be interpolated to the initialized Sw-table

        If you have krw and krow in different dataframes, call this
        function twice

        Calling function is responsible for checking if any data was
        actually added to the table.

        The python package ecl2df has a tool for converting Eclipse input
        files to dataframes.

        Args:
            df: Pandas dataframe containing data
            swcolname: string, column name with the saturation data in the dataframe.
            krwcolname: string, name of the column with krw
            krowcolname: string
            pccolname: string
            krwcomment: string
            krowcomment: string
            pccomment: string
        """
        from scipy.interpolate import PchipInterpolator

        if swcolname not in df:
            raise Exception(swcolname +
                            " not found in dataframe, can't read table data")
        if krwcolname in df:
            pchip = PchipInterpolator(df[swcolname].astype(float),
                                      df[krwcolname].astype(float))
            self.table["krw"] = pchip(self.table.sw)
            self.krwcomment = "-- krw from tabular input" + krwcomment + "\n"
        if krowcolname in df:
            pchip = PchipInterpolator(df[swcolname].astype(float),
                                      df[krowcolname].astype(float))
            self.table["krow"] = pchip(self.table.sw)
            self.krowcomment = "-- krow from tabular input" + krowcomment + "\n"
        if pccolname in df:
            pchip = PchipInterpolator(df[swcolname].astype(float),
                                      df[pccolname].astype(float))
            self.table["pc"] = pchip(self.table.sw)
            self.pccomment = "-- pc from tabular input" + pccomment + "\n"
コード例 #8
0
    def load(self):
        self.rawData = {}
        try:
            self.rawData['nuE'] = pd.read_csv(self.basePath +
                                              '/AtProduction_neutrinos_e.dat',
                                              sep=r"\s*",
                                              engine='python')
            self.rawData['nuMu'] = pd.read_csv(
                self.basePath + '/AtProduction_neutrinos_mu.dat',
                sep=r"\s*",
                engine='python')
            self.rawData['nuTau'] = pd.read_csv(
                self.basePath + '/AtProduction_neutrinos_tau.dat',
                sep=r"\s*",
                engine='python')
            self.loaded = True
        except:
            raise NameError('PPPC spectra not found at path ' + self.basePath +
                            ' !')

        self.log10x = self.rawData['nuE'].loc[(
            self.rawData['nuE'].mDM == self.mass), ['Log[10,x]']].values.T[0]

        self.sourceSpectra = {}
        self.sourceSpectra['nuE'] = self.rawData['nuE'].loc[
            (self.rawData['nuE'].mDM == self.mass),
            [self.columns[self.process]]].values.T[0]
        self.sourceSpectra['nuMu'] = self.rawData['nuMu'].loc[
            (self.rawData['nuMu'].mDM == self.mass),
            [self.columns[self.process]]].values.T[0]
        self.sourceSpectra['nuTau'] = self.rawData['nuTau'].loc[
            (self.rawData['nuTau'].mDM == self.mass),
            [self.columns[self.process]]].values.T[0]

        oszillation = infOsz()
        oszillatedSpectra = oszillation.oscillate(self.sourceSpectra['nuE'],
                                                  self.sourceSpectra['nuE'],
                                                  self.sourceSpectra['nuMu'],
                                                  self.sourceSpectra['nuMu'],
                                                  self.sourceSpectra['nuTau'],
                                                  self.sourceSpectra['nuTau'])
        self.earthSpectra = {}
        self.earthSpectra['nuE'] = oszillatedSpectra[0] / 2.
        self.earthSpectra['nuMu'] = oszillatedSpectra[1] / 2.
        self.earthSpectra['nuTau'] = oszillatedSpectra[2] / 2.

        self.sourceSpectrum_interpol = {}
        self.earthSpectrum_interpol = {}

        for f in ['nuE', 'nuMu', 'nuTau']:
            self.earthSpectrum_interpol[f] = PchipInterpolator(
                self.log10x, self.earthSpectra[f], extrapolate=False)
            self.sourceSpectrum_interpol[f] = PchipInterpolator(
                self.log10x, self.sourceSpectra[f], extrapolate=False)
コード例 #9
0
ファイル: generate-cross-sections.py プロジェクト: niess/ent
def interpolate_cross_section(model):
    '''Interpolate using PCHIP algorithm
    '''
    cs = model()

    p00 = PchipInterpolator(x=numpy.log(cs[:, 0]), y=numpy.log(cs[:, 1]))
    p01 = PchipInterpolator(x=numpy.log(cs[:, 0]), y=numpy.log(cs[:, 2]))
    p10 = PchipInterpolator(x=numpy.log(cs[:, 0]), y=numpy.log(cs[:, 3]))
    p11 = PchipInterpolator(x=numpy.log(cs[:, 0]), y=numpy.log(cs[:, 4]))

    return p00, p01, p10, p11
コード例 #10
0
        def post_extreme(data, case_type):

            if case_type == 3:
                t_s = min(max(data['Time'][0], 30.), data['Time'][-2])
                t_e = min(data['Time'][-1], 90.)
                idx_s = list(data['Time']).index(t_s)
                idx_e = list(data['Time']).index(t_e)
            else:
                idx_s = 0
                idx_e = -1

            Time = data['Time'][idx_s:idx_e]
            var_Fx = [
                "B1N1Fx", "B1N2Fx", "B1N3Fx", "B1N4Fx", "B1N5Fx", "B1N6Fx",
                "B1N7Fx", "B1N8Fx", "B1N9Fx"
            ]
            var_Fy = [
                "B1N1Fy", "B1N2Fy", "B1N3Fy", "B1N4Fy", "B1N5Fy", "B1N6Fy",
                "B1N7Fy", "B1N8Fy", "B1N9Fy"
            ]
            for i, (varFxi, varFyi) in enumerate(zip(var_Fx, var_Fy)):
                if i == 0:
                    Fx = np.array(data[varFxi][idx_s:idx_e])
                    Fy = np.array(data[varFyi][idx_s:idx_e])
                else:
                    Fx = np.column_stack(
                        (Fx, np.array(data[varFxi][idx_s:idx_e])))
                    Fy = np.column_stack(
                        (Fy, np.array(data[varFyi][idx_s:idx_e])))

            Fx_sum = np.zeros_like(Time)
            Fy_sum = np.zeros_like(Time)
            for i in range(len(Time)):
                Fx_sum[i] = np.trapz(Fx[i, :], R_out)
                Fy_sum[i] = np.trapz(Fy[i, :], R_out)
            idx_max_strain = np.argmax(np.sqrt(Fx_sum**2. + Fy_sum**2.))

            Fx = [data[Fxi][idx_max_strain] for Fxi in var_Fx]
            Fy = [data[Fyi][idx_max_strain] for Fyi in var_Fy]
            spline_Fx = PchipInterpolator(R_out, Fx)
            spline_Fy = PchipInterpolator(R_out, Fy)

            r = params['r'] - params['Rhub']
            Fx_out = spline_Fx(r)
            Fy_out = spline_Fy(r)
            Fz_out = np.zeros_like(Fx_out)

            unknowns['loads_Px'] = Fx_out
            unknowns['loads_Py'] = Fy_out * -1.
            unknowns['loads_Pz'] = Fz_out

            unknowns['loads_Omega'] = data['RotSpeed'][idx_max_strain]
            unknowns['loads_pitch'] = data['BldPitch1'][idx_max_strain]
            unknowns['loads_azimuth'] = data['Azimuth'][idx_max_strain]
コード例 #11
0
    def compute(self, inputs, outputs):
        # Fit spline to powercurve for higher grid density
        V_spline = np.linspace(inputs['v_min'], inputs['v_max'],
                               self.n_pc_spline)
        spline = PchipInterpolator(inputs['V'], inputs['P'])
        P_spline = spline(V_spline)
        spline = PchipInterpolator(inputs['V'], inputs['Omega'])
        Omega_spline = spline(V_spline)

        # outputs
        outputs['V_spline'] = V_spline.flatten()
        outputs['P_spline'] = P_spline.flatten()
        outputs['Omega_spline'] = Omega_spline.flatten()
コード例 #12
0
    def compute(self, inputs, outputs):
        # Fit spline to powercurve for higher grid density
        V_spline = np.linspace(inputs["v_min"], inputs["v_max"],
                               self.n_pc_spline)
        spline = PchipInterpolator(inputs["V"], inputs["P"])
        P_spline = spline(V_spline)
        spline = PchipInterpolator(inputs["V"], inputs["Omega"])
        Omega_spline = spline(V_spline)

        # outputs
        outputs["V_spline"] = V_spline.flatten()
        outputs["P_spline"] = P_spline.flatten()
        outputs["Omega_spline"] = Omega_spline.flatten()
コード例 #13
0
    def __init__(self, cachedir=None, whichPlanetPhaseFunction='lambert', **specs):
        
        #start the outspec
        self._outspec = {}

        # cache directory
        self.cachedir = get_cache_dir(cachedir)
        self._outspec['cachedir'] = self.cachedir
        specs['cachedir'] = self.cachedir

        # load the vprint function (same line in all prototype module constructors)
        self.vprint = vprint(specs.get('verbose', True))
        
        #Select which Phase Function to use
        assert isinstance(whichPlanetPhaseFunction, str), "whichPlanetPhaseFunction is not a string"
        self.whichPlanetPhaseFunction = whichPlanetPhaseFunction
        if whichPlanetPhaseFunction == 'quasiLambertPhaseFunction':
            from EXOSIMS.util.phaseFunctions import quasiLambertPhaseFunction
            self.calc_Phi = quasiLambertPhaseFunction
        elif whichPlanetPhaseFunction == 'hyperbolicTangentPhaseFunc':
            from EXOSIMS.util.phaseFunctions import hyperbolicTangentPhaseFunc
            self.calc_Phi = hyperbolicTangentPhaseFunc
        #else: if whichPlanetPhaseFunction == 'lambert': Default, Do nothing
        self._outspec['whichPlanetPhaseFunction'] = whichPlanetPhaseFunction

        #Define Phase Function Inverse
        betas = np.linspace(start=0.,stop=np.pi,num=1000,endpoint=True)*u.rad
        Phis = self.calc_Phi(betas)
        self.betaFunction = PchipInterpolator(-Phis,betas) #the -Phis ensure the function monotonically increases
コード例 #14
0
def cubic_interp(obs_t, cum_obs):
    """
    Construct a cubic count interpolant
    (which for monotonic counts is a quadratic rate)
    """

    # extend with null counts
    # so that it extrapolates, but conservatively
    obs_t = np.concatenate([
        [obs_t[0] - 2, obs_t[0] - 1],
        obs_t,
        [obs_t[-1] + 1, obs_t[-1] + 2]
    ])
    cum_obs = np.concatenate([
        [cum_obs[0], cum_obs[0]],
        cum_obs,
        [cum_obs[-1], cum_obs[-1]]
    ])

    big_n_hat = PPoly.from_bernstein_basis(
        PchipInterpolator(
            obs_t,
            cum_obs,
            extrapolate=True
        )
    )
    return big_n_hat
コード例 #15
0
def constrained_cubic_integrate_point(y_coarse, ntx, nti, end=130):
    """
    Like :py:function`constrained_cubic_disaggregation_interval`,
    this integrates over an interpolated curve, but this assumes the
    input data is defined pointwise at the start of each interval,
    so it first does a fit, then integrates, then fits, then takes
    derivatives.

    Args:
        y_coarse (np.ndarray): dependent axis, same size as :math:`x`.
            This can have more than one dimension, and the algorithm
            treats all but the last dimension as separate runs.
        x_coarse (DemographicInterval): independent axis
        x_fine (DemographicInterval): new independent axis
        end (float): Where to set the value to zero.

    Returns:
        np.ndarray: dependent values on finer axis
    """
    x = np.hstack([ntx.start, [end]])
    out_shape = list(y_coarse.shape[:-1]) + [len(nti)]
    y_out = np.zeros(out_shape, dtype=y_coarse.dtype)
    y = np.zeros((y_coarse.shape[-1] + 1, ))
    y[-1] = 0
    for draw in itertools.product(*[range(x) for x in y_coarse.shape[:-1]]):
        y[:-1] = y_coarse[draw]
        assert x.shape == y.shape
        fit = PchipInterpolator(x, y, extrapolate=True)
        # Last entry is for T_{x+5}
        integrate_once = 1
        integrated = fit.antiderivative(integrate_once)(nti.bound)
        y_out[draw] = np.diff(integrated, 1, axis=-1)
    return y_out
コード例 #16
0
ファイル: univariate_sampler.py プロジェクト: d-toth1/Misc
 def __init__(self, x):
     self.original_sample = x
     self.sample_size = len(x)
     self.y_vals = np.linspace(0, 1, self.sample_size)
     self.inverse_cdf = PchipInterpolator(self.y_vals,
                                          np.sort(self.original_sample))
     self.rng = np.random.default_rng()
コード例 #17
0
def predictor(DATA, samples):
    tempDATA = np.zeros((2, samples))
    autocorrs = np.zeros((2, (samples * 2) - 1))

    if samples != DATA[0].size:
        for dim in range(2):
            pchip = PchipInterpolator(np.arange(1, DATA[1 - dim].size + 1),
                                      DATA[dim])
            tempDATA[dim] = pchip(np.linspace(1, DATA[dim].size, samples))
            autocorrs[dim] = autocorr(tempDATA[dim])
    else:
        tempDATA = DATA

    rowCount = autocorrs[1].size
    rows = []
    divisors = []
    predictLen = (samples * 3) - 2
    for row in range(rowCount):
        result = weightedAvgConvolve(tempDATA[1] + row - ((rowCount - 1) / 2),
                                     autocorrs[0] * autocorrs[1][row])
        rows.append(result[0])
        divisors.append(result[1])
    span = tempDATA[0][-1] - tempDATA[0][0]
    rowsSum = np.sum(
        rows, axis=0
    )  #np.divide(np.sum(rows, axis=0), np.array([((x - (predictLen / 2)) + 1) ** 0.2 for x in range(predictLen)]))
    x = np.linspace(tempDATA[0][0] - span, tempDATA[0][-1] + span,
                    predictLen) - span
    y = rowsSum / np.sum(divisors, axis=0)
    z = [0 for x in range(predictLen)]
    return np.array([[x[i], y[i], z[i]] for i in range(x.size)])
コード例 #18
0
 def solve_vol_from_forward_moneyness(self, m, Ts):
     # if isinstance(Ts, Iterable) and not isinstance(Ts, str):
     #     assert (max(Ts) <= self.listed_dates[-1]) and (min(Ts) >= self.listed_dates[0])  # Guarantee interpolation
     if isinstance(Ts, str):
         Ts = pd.to_datetime(Ts)
     if Ts > self.listed_dates[-1]:
         last_lmn = self.curves[-1]
         T_y = (Ts - self.valuation_date).days / 365
         new_lmn = Lognormal_mixture_model_curve(
             T_y, last_lmn.spot, self.forward.get_forward(Ts), last_lmn.r,
             last_lmn.d, self.valuation_date, self.N)
         new_lmn.set_param(p=last_lmn.p,
                           xi=last_lmn.xi,
                           sigma=last_lmn.sigma)
         return new_lmn.solve_bsm_vol(new_lmn.forward * m)
     else:
         Ts = pd.Series(Ts)
         Vol_points = np.vectorize(
             lambda x: x.solve_bsm_vol(x.forward * m))(self.curves)
         Ts = np.vectorize(lambda x: (x - self.valuation_date).days / 365)(
             Ts)
         lds = np.vectorize(lambda x: (x - self.valuation_date).days / 365)(
             self.listed_dates)
         variance = np.square(Vol_points) * lds
         interp_var = PchipInterpolator(lds, variance)(Ts) / Ts
         return np.sqrt(interp_var)
コード例 #19
0
def evaluate_tail(tail, x, y, mean):
    """Evaluate a tail value proposal.

    Parameters
    ----------
    tail: float
        The proposed tail value.
    x: ndarray
        The bin boundaries.
    y: ndarray
        The bin values.
    mean: float
        The target mean value

    Returns
    -------
    loss: float
        The loss associated with the tail proposal
    """
    # Use current tail guess
    x[-1] = tail
    # Fit spline
    cdf = PchipInterpolator(x, y)
    # Estimate mean
    est_mean = estimate_mean(x[0], x[-1], cdf)
    # Calculate loss
    return (mean - est_mean) ** 2
コード例 #20
0
def interpolate_on_age(
        track,
        delta_t,
        keys=["M", "logL", "logTeff", "logR", "logroeff", "logM_loss"],
        kind='slinear'):
    ## @input: track   --> evolutionary track with given
    ## @input: delta_t --> time step to interpolate evolutionary track to
    ## @input: keys    --> fields of the track that we want to interpolate along
    ## @input: kind    --> type of interoplation to use. Options are: nearest; linear; zero;
    ##                      slinear; quadratic; cubic

    ## Sanity Check
    if not kind in [
            'nearest', 'linear', 'zero', 'slinear', 'quadratic', 'cubic'
    ]:
        raise ValueError, "kind must be either 'nearest', 'linear', 'zero', 'slinear', 'quadratic','cubic'"

    ## Determine initial mass of track
    Mini = track['M'][0]

    ## Build array of ages sampled equidistantly from the beginning to end of
    ## the track with time step delta_t
    age = track['age']
    i_ages = np.arange(min(age), max(age), delta_t)

    ## Build the interpolation function for each field of interest
    ## and use to populate new interpolated track
    i_track = {key: [] for key in keys}
    for key in keys:
        # ifunc = interp1d(age, track[key], kind=kind)(i_ages)
        ifunc = PchipInterpolator(age, track[key], extrapolate=False)(i_ages)
        i_track[key] = ifunc

    return i_track
コード例 #21
0
def node_analysis(G, A, weight, Ninter):

    # calculate shortest path lengths
    dist2A = dict(nx.shortest_path_length(G, source=A, weight=weight))

    # build array of left and right borders of ramp-like functions
    left = []
    right = []
    for u, v, w in G.edges(data=weight):
        du = dist2A[u]
        dv = dist2A[v]
        dmax = 0.5 * (du + dv + w)
        left.append(du)
        left.append(dv)
        right.append(dmax)
        right.append(dmax)
    times = np.linspace(0, 2 * np.amax(right), Ninter)
    left = np.array(left)
    right = np.array(right)
    t = np.expand_dims(times, axis=-1)
    ramps = overlap(t, left, right)
    volume = np.sum(ramps, axis=-1)
    spl = PchipInterpolator(times, volume)

    return spl, np.amax(right)
コード例 #22
0
def get_interpolators(df, currentVariable):

    # Group by username and extract timestamps and values for each user
    grouped_data = df.groupby('username')
    data_by_user = [user for _, user in grouped_data]
    ts = [
        np.array(t)
        for t in (data_by_user[i]['timestamp'].apply(lambda x: float(x))
                  for i in range(len(data_by_user)))
    ]
    vals = [
        np.array(val)
        for val in (data_by_user[i][currentVariable].apply(lambda x: float(x))
                    for i in range(len(data_by_user)))
    ]

    # Make sure all data starts and ends at the same time for each user, if the
    # data doesn't suggest otherwise start and end value are 50.
    max_t = max([max(t) for t in ts])
    total = np.sum([np.sum(v) for v in list(np.array(vals))])
    avg = total / np.sum([len(v) for v in vals])
    for i in range(len(ts)):
        if min(ts[i]) != 0:
            ts[i] = np.append([0], ts[i])
            vals[i] = np.append([avg], vals[i])
        if max(ts[i]) != max_t:
            ts[i] = np.append(ts[i], [max_t])
            vals[i] = np.append(vals[i], [avg])
        # Round last timestamp up (for smoother display):
        ts[i] = np.append(ts[i][:-1], int(ts[i][-1]) + 1)

    # Create the interpolation
    interpolators = [PchipInterpolator(t, val) for (t, val) in zip(ts, vals)]
    return interpolators, max_t
コード例 #23
0
ファイル: util.py プロジェクト: ctroein/octavvs
def load_reference(wn, what=None, matfilename=None):
    """
    Loads and normalizes a spectrum from a Matlab file, interpolating at the given points.
        The reference is assumed to cover the entire range of wavenumbers.
    Parameters:
        wn: array of wavenumbers at which to get the spectrum
        what: A string defining what type of reference to get, corresponding to a file in the
        'reference' directory
        matfilename: the name of an arbitrary Matlab file to load data from; the data must be
        in a matrix called AB, with wavenumbers in the first column.
        Returns: spectrum at the points given by wn
    """
    if (what is None) == (matfilename is None):
        raise ValueError("Either 'what' or 'matfilename' must be specified")
    if what is not None:
        matfilename = resource_filename('octavvs.reference_spectra',
                                        what + ".mat")


#        matfilename = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__),
#                                       'reference', what + '.mat'))
    ref = read_mat(matfilename)['AB']
    # Handle the case of high-to-low since the interpolator requires low-to-high
    d = 1 if ref[0, 0] < ref[-1, 0] else -1
    ref = PchipInterpolator(ref[::d, 0], ref[::d, 1])(wn)
    return ref  #/ ref.max()
コード例 #24
0
def correct_b0(img, freqs, b0, offsets, bin=10, thresh=1e5):
    '''Corrects for B0 effects and returns desired offset images.
    
    img = 4D array
    freqs = frequencies corresponding in order to the 4th dim of the img
    offset = desired CEST offset (in a list, allows multiple)
    thresh = Masking threshold
    
    Returns:
    b0map - b0 offset map
    plusmap - +offset map corrected for b0
    minmap - -offset map corrected for b0'''
    cim_f = np.reshape(img, (np.prod(img.shape[:-1]), img.shape[-1]))
    b0map = np.reshape(b0, (np.prod(img.shape[:-1])))
    plusmap = np.zeros((len(cim_f), len(offsets)))
    minmap = np.zeros((len(cim_f), len(offsets)))

    for vx in range(len(cim_f)):
        if cim_f[vx, -1] > thresh:
            ip = PchipInterpolator(freqs, cim_f[vx, :])
            xn = np.arange(freqs[0], freqs[-1], bin)
            yn = ip(xn)
            off = b0map[vx]
            for i, o in enumerate(offsets):
                plus = yn[find_nearest(xn, o + off)]
                minus = yn[find_nearest(xn, -o + off)]
                plusmap[vx, i] = plus
                minmap[vx, i] = minus

    nshape = list(img.shape[:-1])
    nshape.append(len(offsets))
    plusmap = np.reshape(plusmap, nshape)
    minmap = np.reshape(minmap, nshape)
    return plusmap, minmap
コード例 #25
0
def constrained_cubic_disaggregation_interval(y_coarse, ntx, nti):
    """
    Given observed values on a coarse domain, estimate their
    values on a finer domain. This uses constrained cubic splines.
    It constructs the cumulative sum of the y, fits the spline,
    interpolates to the finer x, and then takes differences across
    intervals.

    Args:
        y_coarse (np.ndarray): dependent axis, same size as :math:`x`.
            This can have more than one dimension, and the algorithm
            treats all but the last dimension as separate runs.
        x_coarse (DemographicInterval): independent axis
        x_fine (DemographicInterval): new independent axis

    Returns:
        np.ndarray: dependent values on finer axis
    """
    out_shape = list(y_coarse.shape[:-1]) + [len(nti)]
    LOGGER.debug("in {} out {}".format(y_coarse.shape, out_shape))
    y_out = np.zeros(out_shape, dtype=y_coarse.dtype)
    for draw in itertools.product(*[range(x) for x in y_coarse.shape[:-1]]):
        y = np.hstack([[0], np.cumsum(y_coarse[draw], axis=-1)])
        fit = PchipInterpolator(ntx.bound, y, extrapolate=True)(nti.bound)
        y_out[draw] = np.diff(fit, 1, axis=-1)
    return y_out
コード例 #26
0
def simp_dist(f, pca):
    reduced = pca.transform([f])
    rec = pca.pt_from_proj(reduced) + pca.bary
    rec_pdf = pca.get_pdf(rec)

    qgrid = np.cumsum(f.pdf_eval) * (f.pdf_grid[1] - f.pdf_grid[0])
    orig_qeval = f.pdf_grid

    # reconstruct the INV-CDF
    rec_cdf = np.cumsum(rec_pdf) * (f.pdf_grid[1] - f.pdf_grid[0])

    # adjust for possible flat regions
    keep = np.where(np.diff(rec_cdf) > 1e-10)
    rec_invcdf = PchipInterpolator(rec_cdf[keep],
                                   f.pdf_grid[keep],
                                   extrapolate=False)

    rec_invcdf_eval = rec_invcdf(qgrid)
    nans = np.where(np.isnan(rec_invcdf_eval))[0]
    if len(nans) > 0:
        rec_invcdf_eval[nans[nans < len(rec_invcdf_eval) / 2]] = 0.0
        rec_invcdf_eval[nans[nans > len(rec_invcdf_eval) / 2]] = 1.0

    er = np.sqrt(np.sum(
        (orig_qeval - rec_invcdf_eval)[1:]**2 * np.diff(qgrid)))
    return er
    def _init_joint_interpolators(self, plan):
        #Create a default interpolator that uses pchip interpolator
        assert isinstance(plan, JointTrajectoryWaypointPlan)
        assert len(plan.waypoints) >= 2

        interps = []
        t = np.array([p.time_from_start for p in plan.waypoints])

        if len(plan.waypoints) == 2:
            #Use linear interpolator for 2 points
            for j in xrange(len(self._robot.joint_type)):
                t1 = np.hstack((-10, t, t[-1] + 10))
                x = np.array([p.positions[j] for p in plan.waypoints])
                x1 = np.hstack((x[0], x, x[-1]))
                pchip = interp1d(t1, x1)
                interps.append(pchip)
        else:
            #Use pchip interpolators for three or more points
            for j in xrange(len(self._robot.joint_type)):
                t1 = np.hstack((-10, t, t[-1] + 10))
                x = np.array([p.positions[j] for p in plan.waypoints])
                x1 = np.hstack((x[0], x, x[-1]))
                pchip = PchipInterpolator(t1, x1)
                interps.append(pchip)

        return interps
コード例 #28
0
ファイル: spline.py プロジェクト: tonyxwz/pyADAPT
 def variance_spline(self):
     """ interpolate standard deviation
     to calculate the objective (error) function
     interpolate linearly on variance
     """
     pp = PchipInterpolator(self.time, self.variances)
     return pp
コード例 #29
0
ファイル: gp_sfh.py プロジェクト: CalebLammers/dense_basis
def Pchip_interpolator(x,y,res = 1000):
    
    interpolator = PchipInterpolator(x,y)
    x_pred = np.linspace(np.amin(x), np.amax(x), res)
    y_pred = interpolator(x_pred)
    
    return x_pred, y_pred
    def goal_cb(self, gh):
        with self._lock:

            g = gh.get_goal()
            if g.trajectory.joint_names != [
                    'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5',
                    'joint_6'
            ]:
                gh.set_rejected(text="Invalid joint names")
                print "Invalid joint names"
                return

            start_joint_angles = np.array(g.trajectory.points[0].positions)
            if np.any(
                    np.abs(start_joint_angles -
                           self._current_joint_angles) > np.deg2rad(5)):
                gh.set_rejected()
                return
            else:
                gh.set_accepted()

            interps = []
            t = np.array(
                [p.time_from_start.to_sec() for p in g.trajectory.points])
            for j in xrange(6):
                x = np.array([p.positions[j] for p in g.trajectory.points])
                pchip = PchipInterpolator(t, x)
                interps.append(pchip)

            self._abort_trajectory()

            self._trajectory_gh = gh
            self._trajectory_max_t = t[-1]
            self._trajectory_interp = interps
            self._trajectory_valid = True