Esempio n. 1
0
 def yonas_3(self):
     a, b = 0, 3
     x_sim = np.linspace(truncnorm.ppf(0.01, a, b),
                         truncnorm.ppf(0.99, a, b), self.n_intervals)
     x = np.arange(self.n_intervals)
     y = np.exp(truncnorm.pdf(x_sim, a, b) * 1.35)
     return y
Esempio n. 2
0
 def late(self):
     a, b = 2, 3
     y = np.linspace(truncnorm.ppf(0.01, a, b), truncnorm.ppf(0.99, a, b),
                     self.n_intervals)
     y = np.exp(truncnorm.pdf(y, a, b))
     y = y[::-1] / 13 + 1
     y -= y[0] - 1
     return y
Esempio n. 3
0
 def late(self):
     a, b = 2, 100
     x_sim = np.linspace(truncnorm.ppf(0.01, a, b),
                         truncnorm.ppf(0.99, a, b), self.n_intervals)
     x = np.arange(50)
     y = np.exp(truncnorm.pdf(x_sim, a, b))
     y = y[::-1] / 15 + 1
     y -= y[0] - 1
     return y
Esempio n. 4
0
def gen_model(mean=0.0,
              std=1.0,
              num_sigma=6.0,
              order=1,
              numel=512,
              real_type=RealType.FixedPoint):
    # create mixed-signal model
    model = MixedSignalModel('model', build_dir=BUILD_DIR, real_type=real_type)
    model.add_digital_input('clk')
    model.add_digital_input('rst')
    model.add_analog_output('real_out')

    # compute the inverse CDF of the distribution (truncated to 0, 1 domain)
    inv_cdf = lambda x: truncnorm.ppf(
        x, -num_sigma, +num_sigma, loc=mean, scale=std)

    # create the function object
    inv_cdf_func = model.make_function(inv_cdf,
                                       domain=[0.0, 1.0],
                                       order=order,
                                       numel=numel)

    model.set_this_cycle(
        model.real_out,
        model.arbitrary_noise(inv_cdf_func, clk=model.clk, rst=model.rst))

    # write the model
    return model.compile_to_file(VerilogGenerator())
Esempio n. 5
0
    def test_pdf(self):
        pdf = PDF(norm)

        # even if it's really far out it's still a valid value
        assert_equal(pdf.valid(1003), 1003)
        # logp
        assert_equal(pdf.logp(0), norm.logpdf(0))

        # examine dist with finite support
        pdf = PDF(truncnorm(-1, 1))
        assert_equal(pdf.logp(-2), -np.inf)
        assert_equal(pdf.logp(-0.5), truncnorm.logpdf(-0.5, -1, 1))

        # obtain a random value of a bounds instance
        vals = pdf.rvs(size=1000)
        assert_(np.min(vals) >= -1)
        assert_(np.min(vals) <= 1)

        # test a uniform distribution
        pdf = PDF(uniform(1, 9))
        assert_equal(pdf.logp(2), np.log(1.0 / 9.0))
        assert_equal(pdf.logp(10.0), np.log(1.0 / 9.0))

        # test the invcdf
        rando = np.random.uniform(size=10)
        pdf = PDF(truncnorm(-1, 1))
        assert_equal(pdf.invcdf(rando), truncnorm.ppf(rando, -1, 1))
Esempio n. 6
0
def LOS_clouds_priortransform(u,
                              rlims=(0., 6.),
                              dlims=(4., 19.),
                              pb_params=(-3., 0.7, -np.inf, 0.)):
    """
    The "prior transform" for the LOS fit that converts from draws on the
    N-dimensional unit cube to samples from the prior. Used in nested sampling
    methods. Assumes uniform priors for distance and reddening
    and a (truncated) log-normal in outlier fraction.

    Parameters
    ----------
    u : `~numpy.ndarray` of shape `(Nparams)`
        The `2 + 2 * Nclouds` values drawn from the unit cube.
        Contains the portion of outliers `P_b`, followed by the foreground
        reddening `fred`, followed by a series of `(dist, red)` pairs for
        each "cloud" along the LOS.

    rlims : 2-tuple, optional
        The reddening bounds within which we'd like to sample. Default is
        `(0., 6.)`, which also assumes reddening is in units of Av.

    dlims : 2-tuple, optional
        The distance bounds within which we'd like to sample. Default is
        `(4., 19.)`, which also assumes distance is in units of distance
        modulus.

    pb_params : 4-tuple, optional
        Mean, standard deviation, lower bound, and upper bound for a
        truncated log-normal distribution. The default is
        `(-3., 0.7, -np.inf, 0.)`, which corresponds to a mean of 0.05, a
        standard deviation of a factor of 2, a lower bound of 0, and an
        upper bound of 1.

    Returns
    -------
    x : `~numpy.ndarray` of shape `(Nparams)`
        The transformed parameters.

    """

    # Initialize values.
    x = u[:]

    # pb (outlier fraction)
    pb_mean, pb_std, pb_low, pb_high = pb_params
    a = (pb_low - pb_mean) / pb_std  # set normalized lower bound
    b = (pb_high - pb_mean) / pb_std  # set normalized upper bound
    x[0] = np.exp(truncnorm.ppf(u[0], a, b, loc=pb_mean, scale=pb_std))

    # reddening
    x[1::2] = np.sort(u[1::2]) * (rlims[1] - rlims[0]) + rlims[0]

    # distances
    x[2::2] = np.sort(u[2::2]) * (dlims[1] - dlims[0]) + dlims[0]

    return x
Esempio n. 7
0
        def prior(cube, ndim, nparams):
            # NOTE: You may want to adjust this for your case!
            # truncated normal prior
            cube[0] = truncnorm.ppf(cube[0], a_A, b_A, loc=mu_A, scale=sigma_A)
            # log-uniform prior
            # if alpha = 1e2, it's between 1e1 and 1e3
            cube[1] = 10**(cube[1] * 2 + (alpha_exponent - 1))
            # log-uniform prior
            # if T = 1e2, it's between 1e1 and 1e3
            cube[2] = 10**(cube[2] * 2 + (T_exponent - 1))
            # truncated normal prior
            cube[3] = truncnorm.ppf(cube[3],
                                    a_Eshift,
                                    b_Eshift,
                                    loc=mu_Eshift,
                                    scale=sigma_Eshift)

            if np.isinf(cube[3]):
                self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3])
Esempio n. 8
0
def createBall(frow, fline, fminRad, fmaxRad, fgsdFunction, fprobability, fx,
               fmean, fstd, finterval, fa, fb):
    # print('\ncreating next ball now !!!')
    fGrainSize = {
        2: awGrainSize(fprobability, fx, fmean, fstd, finterval, fa, fb),
        1: truncnorm.ppf(random.random(), fa, fb, fmean, fstd),
        0: random.uniform(fminRad, fmaxRad)
    }[fgsdFunction]
    return random.uniform(
        0,
        fline), frow + fGrainSize, fGrainSize  #random.uniform(fminRad,fmaxRad)
Esempio n. 9
0
def truncnorm_ppf(x, a, b,loc=0., scale=1.):
    """
        Approximate Percentile function of the truncated normal. Particularly in
        the tail regions (where the standard SciPy function may be undefined.
    """

    thres = truncnorm.ppf(x,(a-loc)/scale,(b-loc)/scale,loc=loc, scale=scale)

    if np.any(np.isnan(thres)) or np.any(np.isinf(thres)):
        logging.info("Threshold is Nan using approximations.")
        thres = loc+scale*quantile_tn(x,(a-loc)/scale,(b-loc)/scale)
    return thres
Esempio n. 10
0
    def print_O2_confint(self, t, T, O2conv, save_path=None):

        dX, sigmas = self.get_rate_and_var(T, O2conv)

        if self.constrained:
            lb = truncnorm.ppf(0.025, 0, np.inf, dX, sigmas)
            ub = truncnorm.ppf(0.975, 0, np.inf, dX, sigmas)
        else:
            lb = dX - 2 * sigmas
            ub = dX + 2 * sigmas

        plt.figure()
        plt.plot(t, dX)
        plt.fill_between(t, lb, ub)
        plt.xlabel('Time (min)')
        plt.ylabel('Consumption (% mol)')
        plt.title('Conversion Rate with Uncertainty Bounds')

        if isinstance(save_path, str):
            plt.savefig(save_path)

        plt.show()
Esempio n. 11
0
    def ppf(self,u):
        '''

        Evaluates the percentile function (inverse c.d.f.) for a given array of quantiles.

        :param u: Percentiles for which the ppf will be computed.
        :type u: numpy.array
        :returns:  A Data object containing the values of the ppf.
        :rtype:    natter.DataModule.Data
           
        '''
        
        a,b = (self.param['a']-self.param['mu'])/self.param['sigma'],(self.param['b']-self.param['mu'])/self.param['sigma']
        return Data(truncnorm.ppf(u,a,b,loc=self.param['mu'],scale=self.param['sigma']), 'Percentiles from a %s' % (self.name,))
Esempio n. 12
0
     def priortrans_spec(self,upars):
     
          # calcuate transformation from prior volume to parameter for all modeled parameters

          outdict = {}

          for namepar in ['Teff','log(g)','[Fe/H]','[a/Fe]','Vrad','Vrot','Inst_R','CarbonScale']:
               if namepar in upars.keys():
                    upars_i = upars[namepar]
                    if namepar in self.priordict['uniform'].keys():
                         par_i = (
                              (max(self.priordict['uniform'][namepar])-min(self.priordict['uniform'][namepar]))*upars_i + 
                              min(self.priordict['uniform'][namepar])
                              )
                    elif namepar in self.priordict['gaussian'].keys():
                         par_i = norm.ppf(upars_i,loc=self.priordict['gaussian'][namepar][0],scale=self.priordict['gaussian'][namepar][1])

                    elif namepar in self.priordict['tgaussian'].keys():
                         a = (self.priordict['tgaussian'][namepar][0] - self.priordict['tgaussian'][namepar][2]) / self.priordict['tgaussian'][namepar][3]
                         b = (self.priordict['tgaussian'][namepar][1] - self.priordict['tgaussian'][namepar][2]) / self.priordict['tgaussian'][namepar][3]                     
                         par_i = truncnorm.ppf(upars_i,a,b,loc=self.priordict['tgaussian'][namepar][2],scale=self.priordict['tgaussian'][namepar][3])
                         if par_i == np.inf:
                              par_i = self.priordict['tgaussian'][namepar][1]
                    elif namepar in self.priordict['exp'].keys():
                         par_i = expon.ppf(upars_i,loc=self.priordict['exp'][namepar][0],scale=self.priordict['exp'][namepar][1])
                    elif namepar in self.priordict['texp'].keys():
                         b = (self.priordict['texp'][namepar][1] - self.priordict['texp'][namepar][0]) / self.priordict['texp'][namepar][2]
                         par_i = truncexpon.ppf(upars_i,b,loc=self.priordict['texp'][namepar][0],scale=self.priordict['texp'][namepar][2])
                         if par_i == np.inf:
                              par_i = self.priordict['texp'][namepar][1]
                    else:
                         par_i = (self.defaultpars[namepar][1]-self.defaultpars[namepar][0])*upars_i + self.defaultpars[namepar][0]

                    outdict[namepar] = par_i

          # if fitting a blaze function, do transformation for polycoef
          pcarr = [x_i for x_i in upars.keys() if 'pc' in x_i]
          if len(pcarr) > 0:
               for pc_i in pcarr:
                    if pc_i == 'pc_0':
                         uspec_scale = upars['pc_0']
                         outdict['pc_0'] = (1.25 - 0.75)*uspec_scale + 0.75
                    else:
                         pcind = int(pc_i.split('_')[-1])
                         pcmax = self.polycoefarr[pcind][0]+5.0*self.polycoefarr[pcind][1]
                         pcmin = self.polycoefarr[pcind][0]-5.0*self.polycoefarr[pcind][1]
                         outdict[pc_i] = (pcmax-pcmin)*upars[pc_i] + pcmin

          return outdict
def dndx(W,t0,epsilon,mu,sig,d00,d10,d01,d11,l):
    """
    W[0]: Enviornmental state measure `n'
    W[1]: Fraction of high effort harvesters `x'
    """
    
    "Gradient of selection"
    g = (1-W[0])*(d10*W[1]+d00*(1-W[1]))-W[0]*(d11*W[1]+d01*(1-W[1]))
    f = truncnorm.ppf(W[0],a,b,mu,sig)
    h = norm.ppf(W[0],mu,sig)
    theta=1/mu - 1 
    "dynamical equations"
    dx = W[1]*(1-W[1])*(g)
    dn = [ epsilon*W[0]*(1-W[0])*(-1+(1+theta)*W[1]) , epsilon*W[0]*(1-W[0])*(W[1]-mu) , epsilon*W[0]*(1-W[0])*(W[1]-mu+sig/2-sig*W[0]), epsilon*W[0]*(1-W[0])*(W[1]-h) , epsilon*W[0]*(1-W[0])*(W[1]-f) , epsilon*(W[1]-f)  ]
  
    return([dn[l], dx])
    def inverse_sample(self, hypercube):
        """ Draw sample uniformly from the distribution via inverse sampling. """

        p = super(CustomPrior, self).inverse_sample(hypercube)

        # distance
        p[0] = truncnorm.ppf(hypercube[0], -2.0, 7.0, loc=0.3, scale=0.1)

        # phase of primary hot region
        if p[10] > 0.5:
            p[10] -= 1.0

        # phase of secondary hot region
        if p[11] > 0.5:
            p[11] -= 1.0

        return p
Esempio n. 15
0
    def priortrans_mist(self, upars):
        outdict = {}

        for namepar in [
                'EEP', 'initial_Mass', 'initial_[Fe/H]', 'initial_[a/Fe]'
        ]:
            if namepar in upars.keys():
                upars_i = upars[namepar]
                if namepar in self.priordict['uniform'].keys():
                    par_i = (
                        (max(self.priordict['uniform'][namepar]) -
                         min(self.priordict['uniform'][namepar])) * upars_i +
                        min(self.priordict['uniform'][namepar]))
                elif namepar in self.priordict['gaussian'].keys():
                    par_i = norm.ppf(
                        upars_i,
                        loc=self.priordict['gaussian'][namepar][0],
                        scale=self.priordict['gaussian'][namepar][1])

                elif namepar in self.priordict['tgaussian'].keys():
                    a = (self.priordict['tgaussian'][namepar][0] -
                         self.priordict['tgaussian'][namepar][2]
                         ) / self.priordict['tgaussian'][namepar][3]
                    b = (self.priordict['tgaussian'][namepar][1] -
                         self.priordict['tgaussian'][namepar][2]
                         ) / self.priordict['tgaussian'][namepar][3]
                    par_i = truncnorm.ppf(
                        upars_i,
                        a,
                        b,
                        loc=self.priordict['tgaussian'][namepar][2],
                        scale=self.priordict['tgaussian'][namepar][3])
                    if par_i == np.inf:
                        par_i = self.priordict['tgaussian'][namepar][1]
                else:
                    par_i = (self.defaultpars[namepar][1] -
                             self.defaultpars[namepar][0]
                             ) * upars_i + self.defaultpars[namepar][0]

                outdict[namepar] = par_i

        return outdict
Esempio n. 16
0
    def inverse_sample(self, hypercube):
        """ Draw sample uniformly from the distribution via inverse sampling.

        :param hypercube: A pseudorandom point in an n-dimensional hypercube.

        :return: A parameter ``list``.

        """
        p = super(CustomPrior, self).inverse_sample(hypercube)

        # distance
        p[0] = truncnorm.ppf(hypercube[0], -2.0, 7.0, loc=0.3, scale=0.1)

        if p[10] > 0.5:
            p[10] -= 1.0

        if p[11] > 0.5:
            p[11] -= 1.0

        return p
Esempio n. 17
0
 def _ppf(self, q, a, b, mu, sigma):
     return truncnorm.ppf(q, a, b, loc=mu, scale=sigma)
def FittedISMIP_project_icesheets(nsamps, pyear_start, pyear_end, pyear_step,
                                  cyear_start, cyear_end, baseyear,
                                  pipeline_id, rngseed):

    # Load the data file
    datafilename = "{}_data.pkl".format(pipeline_id)
    datafile = os.path.join(os.path.dirname(__file__), datafilename)

    with open(datafile, 'rb') as f:
        my_data = pickle.load(f)

    years = my_data["years"]
    temp_data = my_data["temp_data"]
    scenario = my_data["scenario"]

    # Load the fit file
    datafilename = "{}_fit.pkl".format(pipeline_id)
    datafile = os.path.join(os.path.dirname(__file__), datafilename)

    with open(datafile, 'rb') as f:
        my_data = pickle.load(f)

    groups_dict = my_data["groups_dict"]
    models_dict = my_data["models_dict"]
    betas_dict = my_data["betas_dict"]
    sigmas_dict = my_data["sigmas_dict"]
    trend_mean = my_data["trend_mean"]
    trend_sd = my_data["trend_sd"]

    # Extract the ice sources from the fitted dictionaries
    icesources = betas_dict.keys()

    # Define the target projection years
    targyears = np.arange(pyear_start, pyear_end + 1, pyear_step)

    # Find the data years that overlap with the target projection years
    (_, datayr_idx, targyear_idx) = np.intersect1d(years,
                                                   targyears,
                                                   return_indices=True)

    # Zero out the temperature data to the base year (Fitted models have 0-forced intercept)
    baseyear_idx = np.flatnonzero(years == baseyear)
    if baseyear_idx.size == 0:
        raise Exception(
            "baseyear is not found in temperature data. baseyear = {}".format(
                baseyear))
    temp_data = temp_data - temp_data[:, baseyear_idx]

    # Set the seed for the RNG
    np.random.seed(rngseed)

    # Initialize the samples dictionary to pass to the post-processing stage
    samps_dict = {}

    # Generate the indices for the temperature samples
    temp_sample_idx = np.random.choice(np.arange(temp_data.shape[0]), nsamps)

    # Generate a list of quantiles for the trend samples
    trend_q = np.random.random_sample(nsamps)

    # Loop over the ice sources
    #for icesource in icesources:
    for icesource in ["GIS"]:

        # Calculate the trend contributions over time for this ice sheet component
        ice_trend = truncnorm.ppf(trend_q,
                                  a=0.0,
                                  b=99999.9,
                                  loc=trend_mean[icesource],
                                  scale=trend_sd[icesource])[:, np.newaxis] * (
                                      targyears - baseyear)[np.newaxis, :]

        # Which model parameters do we need
        betas = betas_dict[icesource]
        sigmas = sigmas_dict[icesource]

        # Generate the indices for the model samples
        model_sample_idx = np.random.choice(np.arange(betas.shape[0]), nsamps)

        # Loop over the number of samples we need
        samps = []
        temp_samps = []
        time_samps = []
        const_samps = []

        for tidx, midx in zip(temp_sample_idx, model_sample_idx):

            # Generate a sample
            (this_sample, samp_temp, samp_time, samp_const) = my_model(temp_data[tidx,datayr_idx], \
                  betas[midx,:], sigmas[midx], \
                  targyears - baseyear, pyear_step)
            samps.append(this_sample)
            #temp_samps.append(samp_temp)
            #time_samps.append(samp_time)
            #const_samps.append(samp_const)

        # Convert the sample array into a numpy array
        samps = np.array(samps)
        #temp_samps = np.array(temp_samps)
        #time_samps = np.array(time_samps)
        #const_samps = np.array(const_samps)

        # Add the trend to the samples
        samps = samps + ice_trend

        # If the user wants to extrapolate projections based on rates, do so here
        if cyear_start or cyear_end:
            for i in np.arange(nsamps):
                samps[i, :] = ExtrapolateRate(samps[i, :], targyears,
                                              cyear_start, cyear_end)

        # Add the total samples to the samples dictionary
        samps_dict[icesource] = samps

        # Write the global projections to output netCDF files
        WriteNetCDF(samps, icesource, targyears[targyear_idx], scenario,
                    pipeline_id, baseyear)
        #WriteNetCDF(temp_samps, "{}TEMP".format(icesource), targyears[targyear_idx], scenario, pipeline_id)
        #WriteNetCDF(time_samps, "{}TIME".format(icesource), targyears[targyear_idx], scenario, pipeline_id)
        #WriteNetCDF(const_samps, "{}CONST".format(icesource), targyears[targyear_idx], scenario, pipeline_id)

    # Write the combined AIS projections to netcdf
    #ais_samps = samps_dict["WAIS"] + samps_dict["EAIS"] + samps_dict["PEN"]
    #WriteNetCDF(ais_samps, "AIS", targyears[targyear_idx], scenario, pipeline_id, baseyear)

    # Store the variables in a pickle
    output = {
        'samps_dict': samps_dict,
        'scenario': scenario,
        'targyears': targyears[targyear_idx],
        'baseyear': baseyear
    }
    outfilename = "{}_projections.pkl".format(pipeline_id)
    outfile = open(os.path.join(os.path.dirname(__file__), outfilename), 'wb')
    pickle.dump(output, outfile)
    outfile.close()

    return (None)
Esempio n. 19
0
    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return super(TruncatedNormal, self).log_prob(
            self._to_std_rv(value)) - self._log_scale


if __name__ == '__main__':
    from scipy.stats import truncnorm
    loc, scale, a, b = 1., 2., 1., 2.
    tn_pt = TruncatedNormal(loc, scale, a, b)
    mean_pt, var_pt = tn_pt.mean.item(), tn_pt.variance.item()
    alpha, beta = (a - loc) / scale, (b - loc) / scale
    mean_sp, var_sp = truncnorm.stats(alpha,
                                      beta,
                                      loc=loc,
                                      scale=scale,
                                      moments='mv')
    print('mean', mean_pt, mean_sp)
    print('var', var_pt, var_sp)
    print('cdf',
          tn_pt.cdf(1.4).item(),
          truncnorm.cdf(1.4, alpha, beta, loc=loc, scale=scale))
    print('icdf',
          tn_pt.icdf(0.333).item(),
          truncnorm.ppf(0.333, alpha, beta, loc=loc, scale=scale))
    print('logpdf',
          tn_pt.log_prob(1.5).item(),
          truncnorm.logpdf(1.5, alpha, beta, loc=loc, scale=scale))
    print('entropy', tn_pt.entropy.item(),
          truncnorm.entropy(alpha, beta, loc=loc, scale=scale))
Esempio n. 20
0
def transform_truncated_normal(x, hyperparameters):
    mu, sigma, a, b = hyperparameters
    ar, br = (a - mu) / sigma, (b - mu) / sigma
    return truncnorm.ppf(x, ar, br, loc=mu, scale=sigma)
Esempio n. 21
0
from msdsl import *
from scipy.stats import truncnorm
m = MixedSignalModel('model')
y = m.add_analog_output('y')
inv_cdf = lambda x: truncnorm.ppf(x, -8, +8)
inv_cdf_func = m.make_function(inv_cdf, domain=[0.0, 1.0])
m.set_this_cycle(y, m.arbitrary_noise(inv_cdf_func))
m.compile_and_print(VerilogGenerator())
Esempio n. 22
0
 def normal_truncated_ppf(self, xvalue):
     return truncnorm.ppf(
         xvalue, (self.lower_limit - self.prior_estimate) / self.spread,
         (self.upper_limit - self.prior_estimate) / self.spread,
         loc=self.prior_estimate,
         scale=self.spread)
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 14:22:02 2020

@author: Paul Vincent Nonat
"""

from scipy.stats import truncnorm
import matplotlib.pyplot as plt

fig, ax = plt.subplots(1, 1)

a, b = 0, np.inf
mean, var, skew, kurt = truncnorm.stats(a, b, moments='mvsk')

x = np.linspace(truncnorm.ppf(0, a, b), truncnorm.ppf(0.99, a, b), 100)

ax.plot(x, truncnorm.pdf(x, a, b), 'r-', lw=5, alpha=1, label='truncnorm pdf')
mean = 2
rv = truncnorm(a, b)

ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

vals = truncnorm.ppf([0.1, 0.1, b], a, b)

np.allclose([0.0001, 1.5, 2], truncnorm.cdf(vals, a, b))

r = truncnorm.rvs(a, b, size=1000)

ax.hist(r, density=True, histtype='stepfilled', alpha=1)
ax.legend(loc='best', frameon=False)
Esempio n. 24
0
def get_truncated_lognormal_example_exact_quantities(lb, ub, mu, sigma):
    f = lambda x: np.exp(x).T

    #lb,ub passed to truncnorm_rv are defined for standard normal.
    #Adjust for mu and sigma using
    alpha, beta = (lb - mu) / sigma, (ub - mu) / sigma

    denom = normal_rv.cdf(beta) - normal_rv.cdf(alpha)
    #truncated_normal_cdf = lambda x: (
    #    normal_rv.cdf((x-mu)/sigma)-normal_rv.cdf(alpha))/denom
    truncated_normal_cdf = lambda x: truncnorm_rv.cdf(
        x, alpha, beta, loc=mu, scale=sigma)
    truncated_normal_pdf = lambda x: truncnorm_rv.pdf(
        x, alpha, beta, loc=mu, scale=sigma)
    truncated_normal_ppf = lambda p: truncnorm_rv.ppf(
        p, alpha, beta, loc=mu, scale=sigma)

    # CDF of output variable (log truncated normal PDF)
    def f_cdf(y):
        vals = np.zeros_like(y)
        II = np.where((y > np.exp(lb)) & (y < np.exp(ub)))[0]
        vals[II] = truncated_normal_cdf(np.log(y[II]))
        JJ = np.where((y >= np.exp(ub)))[0]
        vals[JJ] = 1.
        return vals

    # PDF of output variable (log truncated normal PDF)
    def f_pdf(y):
        vals = np.zeros_like(y)
        II = np.where((y > np.exp(lb)) & (y < np.exp(ub)))[0]
        vals[II] = truncated_normal_pdf(np.log(y[II])) / y[II]
        return vals

    # Analytic VaR of model output
    VaR = lambda p: np.exp(truncated_normal_ppf(p))

    const = np.exp(mu + sigma**2 / 2)

    # Analytic VaR of model output
    CVaR = lambda p: -0.5 / denom * const / (1 - p) * (erf(
        (mu + sigma**2 - ub) / (np.sqrt(2) * sigma)) - erf(
            (mu + sigma**2 - np.log(VaR(p))) / (np.sqrt(2) * sigma)))

    def cond_exp_le_eta(y):
        vals = np.zeros_like(y)
        II = np.where((y > np.exp(lb)) & (y < np.exp(ub)))[0]
        vals[II] = -0.5 / denom * const * (erf(
            (mu + sigma**2 - np.log(y[II])) / (np.sqrt(2) * sigma)) - erf(
                (mu + sigma**2 - lb) / (np.sqrt(2) * sigma))) / f_cdf(y[II])
        JJ = np.where((y >= np.exp(ub)))[0]
        vals[JJ] = mean
        return vals

    ssd = lambda y: f_cdf(y) * (y - cond_exp_le_eta(y))

    mean = CVaR(np.zeros(1))

    def cond_exp_y_ge_eta(y):
        vals = np.ones_like(y) * mean
        II = np.where((y > np.exp(lb)) & (y < np.exp(ub)))[0]
        vals[II] = -0.5 / denom * const * (erf(
            (mu + sigma**2 - ub) / (np.sqrt(2) * sigma)) - erf(
                (mu + sigma**2 - np.log(y[II])) /
                (np.sqrt(2) * sigma))) / (1 - f_cdf(y[II]))
        JJ = np.where((y > np.exp(ub)))[0]
        vals[JJ] = 0
        return vals

    ssd_disutil = lambda eta: (1 - f_cdf(-eta)) * (eta + cond_exp_y_ge_eta(-eta
                                                                           ))

    return f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil
Esempio n. 25
0
 def sample(self, *args):
     x = np.asarray(args)
     self.last_sampled = truncnorm.ppf(x, self.xmin, self.xmax, self.mu,
                                       self.sd)
     return self.last_sampled
Esempio n. 26
0
    def priortrans_phot(self, upars):

        outdict = {}

        # if only fitting the SED, pull Teff/logg/FeH and do prior transformation
        if not self.spec_bool:
            for namepar in ['Teff', 'log(g)', '[Fe/H]', '[a/Fe]']:
                if namepar in upars.keys():
                    upars_i = upars[namepar]
                    if namepar in self.priordict['uniform'].keys():
                        par_i = ((max(self.priordict['uniform'][namepar]) -
                                  min(self.priordict['uniform'][namepar])) *
                                 upars_i +
                                 min(self.priordict['uniform'][namepar]))
                    elif namepar in self.priordict['gaussian'].keys():
                        par_i = norm.ppf(
                            upars_i,
                            loc=self.priordict['gaussian'][namepar][0],
                            scale=self.priordict['gaussian'][namepar][1])

                    elif namepar in self.priordict['tgaussian'].keys():
                        loc = self.priordict['tgaussian'][namepar][2]
                        scale = self.priordict['tgaussian'][namepar][3]
                        a = (self.priordict['tgaussian'][namepar][0] -
                             loc) / scale
                        b = (self.priordict['tgaussian'][namepar][1] -
                             loc) / scale
                        par_i = truncnorm.ppf(upars_i,
                                              a,
                                              b,
                                              loc=loc,
                                              scale=scale)
                        if par_i == np.inf:
                            par_i = self.priordict['tgaussian'][namepar][1]
                    elif namepar in self.priordict['beta'].keys():
                        a = self.priordict['beta'][namepar][0]
                        b = self.priordict['beta'][namepar][1]
                        loc = self.priordict['beta'][namepar][2]
                        scale = self.priordict['beta'][namepar][3]
                        par_i = beta.ppf(upars_i, a, b, loc=loc, scale=scale)

                    elif namepar in self.priordict['exp'].keys():
                        par_i = expon.ppf(
                            upars_i,
                            loc=self.priordict['exp'][namepar][0],
                            scale=self.priordict['exp'][namepar][1])
                    else:
                        par_i = (self.defaultpars[namepar][1] -
                                 self.defaultpars[namepar][0]
                                 ) * upars_i + self.defaultpars[namepar][0]

                    outdict[namepar] = par_i

        isopars = ['log(A)', 'log(R)', 'Av', 'Rv', 'Dist']
        if self.gal_bool:
            if 'Dist' in upars.keys():
                outdict['Dist'] = 1000.0 * self.AP.gal_ppf(upars['Dist'])
                isopars.remove('Dist')

        for namepar in isopars:
            if namepar in upars.keys():
                upars_i = upars[namepar]

                if namepar in self.priordict['uniform'].keys():
                    par_i = (
                        (max(self.priordict['uniform'][namepar]) -
                         min(self.priordict['uniform'][namepar])) * upars_i +
                        min(self.priordict['uniform'][namepar]))
                elif namepar in self.priordict['gaussian'].keys():
                    par_i = norm.ppf(
                        upars_i,
                        loc=self.priordict['gaussian'][namepar][0],
                        scale=self.priordict['gaussian'][namepar][1])

                elif namepar in self.priordict['exp'].keys():
                    par_i = expon.ppf(upars_i,
                                      loc=self.priordict['exp'][namepar][0],
                                      scale=self.priordict['exp'][namepar][1])

                elif namepar in self.priordict['tgaussian'].keys():
                    loc = self.priordict['tgaussian'][namepar][2]
                    scale = self.priordict['tgaussian'][namepar][3]
                    a = (self.priordict['tgaussian'][namepar][0] - loc) / scale
                    b = (self.priordict['tgaussian'][namepar][1] - loc) / scale
                    par_i = truncnorm.ppf(upars_i, a, b, loc=loc, scale=scale)
                    if par_i == np.inf:
                        par_i = self.priordict['tgaussian'][namepar][1]

                elif namepar in self.priordict['texp'].keys():
                    loc = self.priordict['texp'][namepar][2]
                    scale = self.priordict['texp'][namepar][3]
                    a = (self.priordict['texp'][namepar][0] - loc) / scale
                    b = (self.priordict['texp'][namepar][1] - loc) / scale
                    par_i = truncexpon.ppf(upars_i, a, b, loc=loc, scale=scale)
                    if par_i == np.inf:
                        par_i = self.priordict['texp'][namepar][1]

                elif namepar in self.priordict['beta'].keys():
                    a = self.priordict['beta'][namepar][0]
                    b = self.priordict['beta'][namepar][1]
                    loc = self.priordict['beta'][namepar][2]
                    scale = self.priordict['beta'][namepar][3]
                    par_i = beta.ppf(upars_i, a, b, loc=loc, scale=scale)

                elif namepar in self.priordict['loguniform'].keys():
                    par_i = reciprocal.ppf(
                        upars_i, self.priordict['loguniform'][namepar][0],
                        self.priordict['loguniform'][namepar][1])
                else:
                    par_i = (self.defaultpars[namepar][1] -
                             self.defaultpars[namepar][0]
                             ) * upars_i + self.defaultpars[namepar][0]

                outdict[namepar] = par_i

        return outdict
Esempio n. 27
0
def inv_cdf(x):
    return truncnorm.ppf(x, -8, 8)
Esempio n. 28
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm

# using msdsl
from scipy.stats import truncnorm
from msdsl import Function
inv_cdf = lambda x: truncnorm.ppf(x, -6, +6)
func = Function(inv_cdf, domain=[0.0, 0.5], order=1, numel=512, log_bits=5)
# print(func.get_samp_points_spline())
# for elem in func.get_samp_points_spline():
#     print(elem)
#     print(func.calc_addr(elem))

# compare results
test_pts = np.linspace(0, 0.5, 1000)

plt.plot(test_pts, inv_cdf(test_pts))
plt.plot(test_pts, func.eval_on(test_pts))
plt.show()
Esempio n. 29
0
def transform_truncated_normal(x,mu,sigma,a=0.,b=1.):
    ar, br = (a - mu) / sigma, (b - mu) / sigma
    return truncnorm.ppf(x,ar,br,loc=mu,scale=sigma)
Esempio n. 30
0
    def priortrans_spec(self, upars):

        # calcuate transformation from prior volume to parameter for all modeled parameters

        outdict = {}

        for namepar in [
                'Teff', 'log(g)', '[Fe/H]', '[a/Fe]', 'Vrad', 'Vrot', 'Vmic',
                'Inst_R'
        ]:
            if namepar in upars.keys():
                upars_i = upars[namepar]
                if namepar in self.priordict['uniform'].keys():
                    par_i = (
                        (max(self.priordict['uniform'][namepar]) -
                         min(self.priordict['uniform'][namepar])) * upars_i +
                        min(self.priordict['uniform'][namepar]))
                elif namepar in self.priordict['gaussian'].keys():
                    par_i = norm.ppf(
                        upars_i,
                        loc=self.priordict['gaussian'][namepar][0],
                        scale=self.priordict['gaussian'][namepar][1])

                elif namepar in self.priordict['tgaussian'].keys():
                    loc = self.priordict['tgaussian'][namepar][2]
                    scale = self.priordict['tgaussian'][namepar][3]
                    a = (self.priordict['tgaussian'][namepar][0] - loc) / scale
                    b = (self.priordict['tgaussian'][namepar][1] - loc) / scale
                    par_i = truncnorm.ppf(upars_i, a, b, loc=loc, scale=scale)
                    if par_i == np.inf:
                        par_i = self.priordict['tgaussian'][namepar][1]
                elif namepar in self.priordict['beta'].keys():
                    a = self.priordict['beta'][namepar][0]
                    b = self.priordict['beta'][namepar][1]
                    loc = self.priordict['beta'][namepar][2]
                    scale = self.priordict['beta'][namepar][3]
                    par_i = beta.ppf(upars_i, a, b, loc=loc, scale=scale)
                elif namepar in self.priordict['exp'].keys():
                    par_i = expon.ppf(upars_i,
                                      loc=self.priordict['exp'][namepar][0],
                                      scale=self.priordict['exp'][namepar][1])
                else:
                    par_i = (self.defaultpars[namepar][1] -
                             self.defaultpars[namepar][0]
                             ) * upars_i + self.defaultpars[namepar][0]

                outdict[namepar] = par_i

        # if fitting a blaze function, do transformation for polycoef
        pcarr = [x_i for x_i in upars.keys() if 'pc' in x_i]
        if len(pcarr) > 0:
            for pc_i in pcarr:
                if pc_i == 'pc_0':
                    uspec_scale = upars['pc_0']
                    outdict['pc_0'] = (2.0 - 0.5) * uspec_scale + 0.5
                else:
                    pcind = int(pc_i.split('_')[-1])

                    # pcmax = self.polycoefarr[pcind][0]+3.0*self.polycoefarr[pcind][1]
                    # pcmin = self.polycoefarr[pcind][0]-3.0*self.polycoefarr[pcind][1]
                    # outdict[pc_i] = (pcmax-pcmin)*upars[pc_i] + pcmin

                    # outdict[pc_i] = norm.ppf(upars[pc_i],loc=self.polycoefarr[pcind][0],scale=self.polycoefarr[pcind][1])

                    loc = self.polycoefarr[pcind][0]
                    scale = self.polycoefarr[pcind][1]
                    minval = loc - 5.0 * scale
                    maxval = loc + 5.0 * scale
                    a = (minval - loc) / scale
                    b = (maxval - loc) / scale
                    outdict[pc_i] = truncnorm.ppf(upars[pc_i],
                                                  a,
                                                  b,
                                                  loc=loc,
                                                  scale=scale)

        return outdict