Пример #1
0
def f_p_I(s=0.465, loc=0, scale=5.5, fst_q=0.0001, lst_q=0.9999):

    st = lognorm.ppf(fst_q, s, scale=scale)
    nd = lognorm.ppf(lst_q, s, scale=scale)

    x_cont = np.linspace(st, nd, 100)
    lognm_pdf = lognorm.pdf(x_cont, s, loc, scale)

    # convertimos a una lista de enteros con índices los días y
    # las entradas los valores de la probabilidad
    # prob_days[i] = sum ( lognm_pdf[j] | x_cont[j] = i div) / cont
    prob_days = []
    i = 0
    sm = 0
    cont = 0

    for j in range(len(x_cont)):

        # función monótona creciente
        if i <= x_cont[j] < i + 1:
            sm += lognm_pdf[j]
            cont += 1
        else:
            prob_days.append(sm / cont)
            i += 1
            cont = 1
            sm = lognm_pdf[j]

    # la última prob se debe anexar al terminar de ejecutarse el código
    prob_days.append(sm / cont)

    return prob_days
Пример #2
0
def calculate_linspace(distribution):
    if distribution[0] == 'EXP':
        lambda_ = distribution[1]
        scale_ = 1 / lambda_
        return np.linspace(expon.ppf(0.001, scale=scale_),
                           expon.ppf(0.999, scale=scale_), 1000)

    if distribution[0] == 'WEIBULL':
        scale = distribution[1]
        shape = distribution[2]
        return np.linspace(weibull_min.ppf(0.001, shape, loc=0, scale=scale),
                           weibull_min.ppf(0.999, shape, loc=0, scale=scale),
                           1000)

    if distribution[0] == 'NORMAL':
        mu = distribution[1]
        sigma = distribution[2]
        return np.linspace(norm.ppf(0.001, loc=mu, scale=sigma),
                           norm.ppf(0.999, loc=mu, scale=sigma), 1000)

    if distribution[0] == 'LOGNORM':
        mu = distribution[1]
        sigma = distribution[2]
        scale = math.exp(mu)
        return np.linspace(lognorm.ppf(0.001, sigma, loc=0, scale=scale),
                           lognorm.ppf(0.999, sigma, loc=0, scale=scale), 1000)
    else:
        return np.linspace(0, 100, 1000)
Пример #3
0
def save_csd(fname, diam, shape, scale, show_plot=False):
    """Save cell size distribution plot.

    Creates files ``*.Packing_histogram.png`` and ``*.Packing_histogram.pdf``
    with cell size distribution histogram and continual probability density
    function.

    Args:
        fname (str): base filename
        diam (ndarray): array of sphere diameters
        shape (float): shape size parameter of log-normal distribution
        scale (float): scale size parameter of log-normal distribution
        show_plot (bool, optional): create window with plot
    """
    if shape == 0:
        xpos = np.linspace(scale / 2, scale * 2, 100)
    else:
        xpos = np.linspace(lognorm.ppf(0.01, shape, scale=scale),
                           lognorm.ppf(0.99, shape, scale=scale), 100)
    plt.figure(figsize=(12, 8))
    plt.rcParams.update({'font.size': 16})
    plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale), lw=3, label='input')
    plt.hist(diam, density=True, label='spheres')
    plt.grid()
    plt.xlabel('Size')
    plt.ylabel('Probability density function')
    plt.legend()
    plt.savefig(fname + 'Packing_histogram.png', dpi=300)
    plt.savefig(fname + 'Packing_histogram.pdf')
    if show_plot:
        plt.show()
Пример #4
0
 def kinetic_dispersion(self):
     #print self.nd_param.k0_shape, self.nd_param.k0_loc, self.nd_param.k0_scale
     k0_weights=np.zeros(self.simulation_options["dispersion_bins"])
     k_start=lognorm.ppf(0.0001, self.nd_param.k0_shape, loc=self.nd_param.k0_loc, scale=self.nd_param.k0_scale)
     k_end=lognorm.ppf(0.9999, self.nd_param.k0_shape, loc=self.nd_param.k0_loc, scale=self.nd_param.k0_scale)
     k0_vals=np.linspace(k_start,k_end, self.simulation_options["dispersion_bins"])
     k0_weights[0]=lognorm.cdf(k0_vals[0], self.nd_param.k0_shape, loc=self.nd_param.k0_loc, scale=self.nd_param.k0_scale)
     for k in range(1, len(k0_weights)):
         k0_weights[k]=lognorm.cdf(k0_vals[k], self.nd_param.k0_shape, loc=self.nd_param.k0_loc, scale=self.nd_param.k0_scale)-lognorm.cdf(k0_vals[k-1], self.nd_param.k0_shape, loc=self.nd_param.k0_loc, scale=self.nd_param.k0_scale)
     #plt.plot(k0_vals, k0_weights)
     #plt.title("k0")
     #plt.show()
     return k0_vals, k0_weights
Пример #5
0
def create_fragility_curve(x, filter_by):
    sigma = 0
    mu = 0
    s = sigma
    x = np.arange(0, 200, 1)
    scale = exp(mu)
    lognorm.pdf(x, s, scale)
    x = np.linspace(0, 6, 200)
    #plt.plot(x, dist.pdf(x))
    #plt.plot(x, dist.cdf(x))
    # Display the lognormal distribution:
    x = np.linspace(lognorm.ppf(0.01, s), lognorm.ppf(0.99, s), 100)
    ax = plt.axes()
    ax.plot(x, lognorm.pdf(x, s), 'r-', lw=5, alpha=0.6, label='lognorm pdf')
Пример #6
0
    def test_fa(self):
        T = 10
        q = generic.fa(self.da, T, 'lognorm')

        p0 = lognorm.fit(self.da.values[:, 0, 0])
        q0 = lognorm.ppf(1 - 1. / T, *p0)
        np.testing.assert_array_equal(q[0, 0, 0], q0)
Пример #7
0
	def GetFirmSize(self):
		if float(self.lstDev) > 0:
			if self.distNorm==True:
				firmsize = float(norm.ppf(float(self.FindURandom()),scale=float(self.lstDev),loc=float(self.averageFirmSize)))
			else:
				firmsize = float(lognorm.ppf(float(self.FindURandom()),float(self.lstDev)/float(self.averageFirmSize),scale=float(self.averageFirmSize)))
			if math.isinf(firmsize) == True:
				firmsize = 0
				logging.info("Infinity encountered")
		else:
			firmsize = self.averageFirmSize
		
		if self.roundval=='floor':
			firmsize = np.floor(firmsize)
		elif self.roundval=='ceil':
			firmsize = np.ceil(firmsize)
		elif self.roundval=='tenths':
			firmsize = np.round(firmsize,1)
		elif self.roundval==True:
			firmsize = np.round(firmsize)
		
		if firmsize > 0:
			return firmsize
		else:
			return 0
Пример #8
0
    def GetFirmSize(self):
        if float(self.lstDev) > 0:
            if self.distNorm == True:
                firmsize = float(
                    norm.ppf(float(self.FindURandom()),
                             scale=float(self.lstDev),
                             loc=float(self.averageFirmSize)))
            else:
                firmsize = float(
                    lognorm.ppf(float(self.FindURandom()),
                                float(self.lstDev) /
                                float(self.averageFirmSize),
                                scale=float(self.averageFirmSize)))
            if math.isinf(firmsize) == True:
                firmsize = 0
                logging.info("Infinity encountered")
        else:
            firmsize = self.averageFirmSize

        if self.roundval == 'floor':
            firmsize = np.floor(firmsize)
        elif self.roundval == 'ceil':
            firmsize = np.ceil(firmsize)
        elif self.roundval == 'tenths':
            firmsize = np.round(firmsize, 1)
        elif self.roundval == True:
            firmsize = np.round(firmsize)

        if firmsize > 0:
            return firmsize
        else:
            return 0
Пример #9
0
 def cvar(self, alph):
     mu = self.mu
     sigma = self.sigma
     q = lognorm.ppf(alph, sigma, 0, np.exp(mu))
     a = norm.cdf((mu+sigma**2-np.log(q))/sigma)
     b = 1 - norm.cdf((np.log(q)-mu)/sigma)
     return np.exp(mu + sigma**2/2) * a/b
Пример #10
0
Файл: arms.py Проект: sauxpa/MAB
 def ppf(self, q):
     """
     Percentile function (inverse cumulative distribution function)
     :param q: np.ndarray, quantiles to evaluate
     :return: np.ndarray, quantiles
     """
     return lognorm.ppf(q, self.mu, self.eta)
Пример #11
0
def VaR_alpha(alpha, parametros):
    
    if(parametros[0] == "gennormal"):
        from scipy.stats import gennorm
        VaR = gennorm.ppf(alpha,parametros[1],parametros[2])
        
    elif(parametros[0] == "normal"):
        from scipy.stats import norm
        VaR = norm.ppf(alpha,parametros[1],parametros[2])
    
    elif(parametros[0] == "gamma"):
        from scipy.stats import gamma
        VaR = gamma.ppf(alpha,parametros[1],scale=parametros[2])
    
    elif(parametros[0] == "pareto"):
        from scipy.stats import pareto
        VaR = pareto.ppf(q=alpha,b=parametros[1],scale=parametros[2])
    
    elif(parametros[0] == "weibull"):
        from scipy.stats import weibull
        VaR = weibull.ppf(q=alpha,b=parametros[1],scale=parametros[2])
    
    else: #(parametros[0] == "lognorm"):
        from scipy.stats import lognorm
        VaR = lognorm.ppf(q=alpha,b=parametros[1],scale=parametros[2])
        
    return VaR
Пример #12
0
    def _get_boundaries_through_warping(
        self, max_batch_length: int, num_quantiles: int,
    ) -> List[int]:

        # NOTE: the following lines do not cover that there is only one example in the dataset
        # warp frames (duration) distribution of train data
        logger.info("Batch quantisation in latent space")
        # linspace set-up
        num_boundaries = num_quantiles + 1
        # create latent linearly equal spaced buckets
        latent_boundaries = np.linspace(
            1 / num_boundaries, num_quantiles / num_boundaries, num_quantiles,
        )
        # get quantiles using lognormal distribution
        quantiles = lognorm.ppf(latent_boundaries, 1)
        # scale up to to max_batch_length
        bucket_boundaries = quantiles * max_batch_length / quantiles[-1]
        # compute resulting bucket length multipliers
        length_multipliers = [
            bucket_boundaries[x + 1] / bucket_boundaries[x]
            for x in range(num_quantiles - 1)
        ]
        # logging
        logger.info(
            "Latent bucket boundary - buckets: {} - length multipliers: {}".format(
                list(map("{:.2f}".format, bucket_boundaries)),
                list(map("{:.2f}".format, length_multipliers)),
            )
        )
        return list(sorted(bucket_boundaries))
Пример #13
0
def CDFm(data,
         nPoint,
         dist='normal',
         mu=0,
         sigma=1,
         analitica=False,
         lim=None):
    import numpy as np
    from scipy.interpolate import interp1d
    from statsmodels.distributions import ECDF
    from scipy.stats import norm, lognorm

    eps = 5e-5
    y = np.linspace(eps, 1 - eps, nPoint)

    if not analitica:
        ecdf = ECDF(data)
        xest = np.linspace(lim[0], lim[1], int(100e3))
        yest = ecdf(xest)
        interp = interp1d(yest, xest, fill_value='extrapolate', kind='nearest')
        x = interp(y)
    else:
        if dist == 'normal':
            x = norm.ppf(y, loc=mu, scale=sigma)
        elif dist == 'lognormal':
            x = lognorm.ppf(y, sigma, loc=0, scale=np.exp(mu))

    return x
Пример #14
0
def score_from_sample(lca,sobol_sample):
    
    vector = lca.tech_params['amount']

    q = (q_high-q_low)*sobol_sample[:n_normal] + q_low
    params_normal_new  = norm.ppf(q,loc=params_normal['loc'],scale=params_normal['scale'])
    np.put(vector,indices_normal,params_normal_new)
    del q

    q = sobol_sample[n_normal:n_normal+n_triang]
    loc   = params_triang['minimum']
    scale = params_triang['maximum']-params_triang['minimum']
    c     = (params_triang['loc']-loc)/scale
    params_triang_new = triang.ppf(q,c=c,loc=loc,scale=scale)
    np.put(vector,indices_triang,params_triang_new)
    del q

    #TODO implement group sampling
#     q = (q_high-q_low)*samples[:,:n_lognor] + q_low
    q = (q_high-q_low)*np.random.rand(n_lognor) + q_low
    params_lognor_new = lognorm.ppf(q,s=params_lognor['scale'],scale=np.exp(params_lognor['loc']))
    np.put(vector,indices_lognor,params_lognor_new)

    lca.rebuild_technosphere_matrix(vector)
    score = (cB*spsolve(lca.technosphere_matrix,d))[0]
        
    return score
Пример #15
0
 def test_fa(self):
     T = 10
     q = generic.fa(self.da, T, "lognorm")
     assert "return_period" in q.coords
     p0 = lognorm.fit(self.da.values[:, 0, 0])
     q0 = lognorm.ppf(1 - 1.0 / T, *p0)
     np.testing.assert_array_equal(q[0, 0, 0], q0)
Пример #16
0
def plot_bias():
    for sampl in np.arange(10, 45, 5):
        errs = []
        ests = []
        real_val = lognorm.ppf(0.5, 1, 0)
        for _ in range(100000):
            x = lognorm.rvs(1, 0, size=sampl)
            #est_val = estimate_median(x)
            est_val = np.median(x)
            err = (real_val - est_val) / real_val
            errs.append(err)
            ests.append(est_val)

        print(np.mean(errs))

        plt.hist(ests, bins=np.arange(0, 4, .1))
        plt.axvline(real_val, label="actual median", color="black")
        plt.axvline(np.mean(ests),
                    label="avg estimated value of median on sample size: " +
                    str(sampl),
                    color="purple")
        plt.axvline(np.median(ests),
                    label="median estimated value of median on sample size: " +
                    str(sampl),
                    color="orange")
        plt.legend()
        plt.title("Sample size = " + str(sampl))
        plt.savefig('plots/sample_' + str(sampl) + '.png')
        plt.close()
        print('processed sample size ' + str(sampl))
Пример #17
0
 def lowerThreshold(self, distribution, specification, tail_probability) :
     if distribution == 'normal' :
         mean = specification['mean']
         std_dev = specification['std_dev']
         return norm.ppf(tail_probability, loc=mean, scale=std_dev)
     elif distribution == 'lognormal' :
         lower = specification['lower']
         scale = specification['scale']
         sigma = specification['sigma']
         return lognorm.ppf(tail_probability, sigma, loc=lower, scale=scale)
Пример #18
0
def make_csd(shape, scale, npart, show_plot=False):
    """Create cell size distribution and save it to file."""
    if shape == 0:
        rads = [scale + 0 * x for x in range(npart)]
    else:
        rads = lognorm.rvs(shape, scale=scale, size=npart)
    with open('diameters.txt', 'w') as fout:
        for rad in rads:
            fout.write('{0}\n'.format(rad))
    if shape == 0:
        xpos = linspace(scale / 2, scale * 2, 100)
    else:
        xpos = linspace(lognorm.ppf(0.01, shape, scale=scale),
                        lognorm.ppf(0.99, shape, scale=scale), 100)
    plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale))
    plt.hist(rads, normed=True)
    plt.savefig('packing_histogram.png')
    plt.savefig('packing_histogram.pdf')
    if show_plot:
        plt.show()
Пример #19
0
def make_csd(shape, scale, npart, show_plot=False):
    """Create cell size distribution and save it to file."""
    if shape == 0:
        rads = [scale + 0 * x for x in range(npart)]
    else:
        rads = lognorm.rvs(shape, scale=scale, size=npart)
    with open('diameters.txt', 'w') as fout:
        for rad in rads:
            fout.write('{0}\n'.format(rad))
    if shape == 0:
        xpos = linspace(scale / 2, scale * 2, 100)
    else:
        xpos = linspace(lognorm.ppf(0.01, shape, scale=scale),
                        lognorm.ppf(0.99, shape, scale=scale), 100)
    plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale))
    plt.hist(rads, normed=True)
    plt.savefig('packing_histogram.png')
    plt.savefig('packing_histogram.pdf')
    if show_plot:
        plt.show()
Пример #20
0
def convert_sample_to_lognor(params,sample):
	"""
	Convert uniform in [0,1] to LOGNORMAL distribution
	"""

	#Make sure all parameters have proper distributions
	assert np.all(params['uncertainty_type'] == ID_LOGNOR)

	q = (Q_HIGH-Q_LOW)*sample + Q_LOW
	params_converted = lognorm.ppf(q,s=params['scale'],scale=np.exp(params['loc']))
	params_converted *= np.sign(params['amount'])
	return params_converted
Пример #21
0
def Dist(stvars, value, inpt):
    v = zeros(inpt)
    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            v[j] = norm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':        
            v[j] = lognorm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':        
            v[j] = beta.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':        
            v[j] = uniform.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    return v
Пример #22
0
 def plot(self, num=1000, axes=None) -> list:
     """
     A method to plot the impact
     """
     x = np.linspace(
         lognorm.ppf(0.001,
                     s=self.data["sigma"],
                     scale=np.exp(self.data["mu"])),
         lognorm.ppf(0.999,
                     s=self.data["sigma"],
                     scale=np.exp(self.data["mu"])),
         num,
     )
     plt.title("%s (PDF)" % (self.data["name"]))
     plt.ylabel("relative likelihood")
     plt.xlabel("impact")
     return plt.plot(
         x,
         lognorm.pdf(x, s=self.data["sigma"],
                     scale=np.exp(self.data["mu"])),
         axes=axes,
     )
Пример #23
0
 def evaluate_lognormal(self, iterations: int = 1000) -> float:
     reduction = np.product(
         list(
             map(
                 lambda x: x["reduction"]
                 if x["implemented"] is True else 1,
                 self.data["vulnerability"]["controls"],
             )))
     return lognorm.ppf(
         np.random.rand(iterations),
         s=self.data["impact"]["sigma"],
         scale=np.exp(self.data["impact"]["mu"]),
     ) * np.random.poisson(lam=self.data["likelihood"]["lam"] * reduction,
                           size=iterations)
Пример #24
0
    def generateLatinHypercubeSampledMultipliers(self, specification_map, number_samples) :
            
        # Construct sets of random sampled multipliers from the selected distribution for each parameter
        multiplier_sets = {}
        for key, specification in specification_map.items() :

            # Generate stratified random probability values for distribution generation via inverse CDF
            stratified_random_probabilities = ((np.array(range(number_samples)) + np.random.random(number_samples))/number_samples)

            # Use stratified random probability values to generate stratified samples from selected distribution via inverse CDF
            distribution = specification['distribution']
            if distribution == 'uniform' :
                lower = specification['settings']['lower']
                base = specification['settings']['upper'] - lower
                multiplier_sets[key] = uniform.ppf(stratified_random_probabilities, loc=lower, scale=base).tolist()
            elif distribution == 'normal' :
                mean = specification['settings']['mean']
                std_dev = specification['settings']['std_dev']
                multiplier_sets[key] = norm.ppf(stratified_random_probabilities, loc=mean, scale=std_dev).tolist()
            elif distribution == 'triangular' :
                a = specification['settings']['a']
                base = specification['settings']['b'] - a
                c_std = (specification['settings']['c'] - a)/base
                multiplier_sets[key] = triang.ppf(stratified_random_probabilities, c_std, loc=a, scale=base).tolist()
            elif distribution == 'lognormal' :
                lower = specification['settings']['lower']
                scale = specification['settings']['scale']
                sigma = specification['settings']['sigma']
                multiplier_sets[key] = lognorm.ppf(stratified_random_probabilities, sigma, loc=lower, scale=scale).tolist()
            elif distribution == 'beta' :
                lower = specification['settings']['lower']
                base = specification['settings']['upper'] - lower
                a = specification['settings']['alpha']
                b = specification['settings']['beta']
                multiplier_sets[key] = beta.ppf(stratified_random_probabilities, a, b, loc=lower, scale=base).tolist()

        # Randomly select from sampled multiplier sets without replacement to form multipliers (dictionaries)
        sampled_multipliers = []
        for i in range(number_samples) :
            sampled_multiplier = {}
            for key, multiplier_set in multiplier_sets.items() :
                random_index = np.random.randint(len(multiplier_set))
                sampled_multiplier[key] = multiplier_set.pop(random_index)
            sampled_multipliers.append(sampled_multiplier)

        return sampled_multipliers
Пример #25
0
def Dist(stvars, value, inpt):
    v = zeros(inpt)
    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            v[j] = norm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0],
                            stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            v[j] = lognorm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[1], 0,
                               exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            v[j] = beta.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0],
                            stvars[j].param[1], stvars[j].param[2],
                            stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            v[j] = uniform.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0],
                               stvars[j].param[1])

    return v
Пример #26
0
def solve_integral_lognormal(rho, sigma2=1):

    if rho >= 1.0:
        rho = 0.9999

    nhg = 30
    x, w = N.polynomial.hermite.hermgauss(nhg)
    Sigma = sigma2 * N.array([[1., rho], [rho, 1.]])
    Nd = 2
    const = N.pi**(-0.5 * Nd)

    # gaussian variable
    xn = N.array(list(itertools.product(*(x, ) * Nd)))

    # gauss hermite weights
    wn = N.prod(N.array(list(itertools.product(*(w, ) * Nd))), 1)

    # normalized diagonal variables
    yn = 2.0**0.5 * N.dot(N.linalg.cholesky(Sigma), xn.T).T

    #scipy gaussian cdf
    yn = norm.cdf(yn, scale=N.sqrt(sigma2))

    # lognormal ppf. loc and s are the means and sigma of the gaussian that produce the lognormal
    gn = lognorm.ppf(yn, s=N.sqrt(sigma2), loc=0.0, scale=1.0)

    # to have Coles and jones
    gn *= N.exp(-sigma2 / 2.)
    gn -= 1.

    gn = N.prod(gn, 1)

    if not N.all(N.isfinite(gn)):
        gn[N.where(N.isinf(gn))] = 0.
        #assert 0

    corr = N.sum((wn * const) * gn)

    return corr
Пример #27
0
def mse():
    """Compute MSE of quantiles from the distributions.
    
    Returns:
        (dict): MSEs of `lognorm`, `gamma`, `weibull` and `erlang` in a dict.
    """
    # quantiles
    quantile_points = [.05,.25,.5,.75,.95]
    def _qMSE(q):
        return np.mean((q - np.array([2.2,3.8,5.1,6.7,11.5]))**2)
    # distributions
    distr = continuous()
    lognorm_quantiles = lognorm.ppf(quantile_points, *distr['lognorm'])
    gamma_quantiles = gamma.ppf(quantile_points, *distr['gamma'])
    weibull_quantiles = weibull.ppf(quantile_points, *distr['weibull'])
    erlang_quantiles = erlang.ppf(quantile_points, *distr['erlang'])
    # MSE
    return {
        'lognorm': _qMSE(lognorm_quantiles),
        'gamma': _qMSE(gamma_quantiles),
        'weibull': _qMSE(weibull_quantiles),
        'erlang': _qMSE(erlang_quantiles)
    }
Пример #28
0
def gammalognorm(rho):
	l = 3
	a = 6.7
	N = 100000
	miu = np.array([0 , 0])
	corr = np.array([[1, rho],[rho, 1]])
	Z = np.random.multivariate_normal(miu, corr, N)
	U = norm.cdf(Z)
	X = gamma.ppf(U[:, 0], a, l)
	Y = lognorm.ppf(U[:, 1], 0.1, 0.05)
	corre = np.corrcoef(X,Y)

	plot1 = plt.subplot2grid((2, 2), (1, 0))
	plot2 = plt.subplot2grid((2, 2), (0, 0), colspan = 2)
	plot3 = plt.subplot2grid((2, 2), (1, 1))

	plot1.plot(X,Y, '.')
	plot2.hist(X, bins=30)
	plot3.hist(Y, bins=30, orientation = 'horizontal')
	plot2.set_title('Case 2, rho={}'.format(rho))
	plt.show()

	print("Case2, rho=", rho,"\n",corre[0][1])
Пример #29
0
def get_baseline_lognormal(alpha, shape, technique, int_options):
    """ Get the expected returns by drawing numerous random deviates from a
    lognormal distribution.
    """
    # Guard interface.
    args = (alpha, shape, technique, int_options)
    assert basic_checks('get_baseline_lognormal', 'in', args)

    # Construct bounds based on quantiles of lognormal distribution.
    lower, upper = EPS, lognorm.ppf(0.9999999, shape)

    # Prepare wrapper for alternative integration strategies.
    func = partial(_wrapper_baseline, alpha, shape)
    # Perform native monte carlo integration.
    if technique == 'naive_mc':
        # Distribute relevant integration options.
        implementation = int_options['naive_mc']['implementation']
        num_draws = int_options['naive_mc']['num_draws']
        seed = int_options['naive_mc']['seed']
        # Perform naive Monte Carlo integration.
        rslt = naive_monte_carlo(func, (lower, upper), num_draws,
                                 implementation, seed)
    elif technique == 'quad':
        # Perform integration based on quadrature.
        rslt = scipy_quad(func, (lower, upper))
    elif technique == 'romberg':
        # Perform integration based on Romberg.
        rslt = scipy_romberg(func, (lower, upper))
    else:
        pass

    # Check result.
    assert basic_checks('get_baseline_lognormal', 'out', rslt)

    # Finishing
    return rslt
Пример #30
0
    def solve_integral(self, rho, sigma2):

        if rho >= 1.0:
            rho = 1 - 1e-08

        nhg = 30
        x, w = N.polynomial.hermite.hermgauss(nhg)
        Sigma = sigma2 * N.array([[1., rho], [rho, 1.]])
        Nd = 2
        const = N.pi**(-0.5 * Nd)

        # gaussian variable
        xn = N.array(list(itertools.product(*(x, ) * Nd)))

        # gauss hermite weights
        wn = N.prod(N.array(list(itertools.product(*(w, ) * Nd))), 1)

        # normalized diagonal variables
        yn = 2.0**0.5 * N.dot(N.linalg.cholesky(Sigma), xn.T).T

        yn = norm.cdf(yn, loc=0., scale=N.sqrt(sigma2))

        gn = lognorm.ppf(yn, s=N.sqrt(sigma2), loc=0.0, scale=1.0)

        # Eq. 16
        gn *= N.exp(-sigma2 / 2.)
        gn -= 1.

        gn = N.prod(gn, 1)

        if not N.all(N.isfinite(gn)):
            gn[N.where(N.isinf(gn))] = 0.

        z = N.sum((wn * const) * gn, axis=0)

        return z
Пример #31
0
def aleform():
    form = Ale_form()
    if request.method == "POST" and form.validate_on_submit():
        #obesity
        weight = float(form.weight.data)
        height = float(form.height.data)
        bmi = float(weight / height**2)
        #bmi distribution
        percentilbmi = lognorm.cdf([bmi], 0.1955, -10, 25.71)
        #value in the obesity county distr
        val_obse = lognorm.ppf([percentilbmi], 0.0099, -449.9, 474.25)
        #diabetes
        diabetes = float(form.diabetes.data)
        val_dia = lognorm.ppf([diabetes], 0.164, -7.143, 14.58)
        #smokers
        smoke = float(form.smoke.data)
        #number of cigarretes distribution
        percentilcigars = lognorm.cdf([smoke], 0.506, 0, 2.29)
        #value in the smoker county distribution
        val_smoke = lognorm.ppf([percentilcigars], 0.062, -65.19, 88.55)
        #exercise
        exercise = float(form.exercise.data)
        val_exer = lognorm.ppf([exercise], 0.105, -36.41, 62.65)
        #hsdiploma
        hsdiploma = float(form.hsdiploma.data)
        val_dip = lognorm.ppf([hsdiploma], 0.208, -11.3, 24.59)
        #poverty
        poverty = float(form.poverty.data)
        val_pov = lognorm.ppf([poverty], 0.279, -3.594, 15.76)
        out_person = [val_exer, val_obse, val_smoke, val_dia, val_pov, val_dip]
        # out_person=[35.41,39,42,17,33.7,35.4]   #lo mas bajo
        #out_person=[8,10,7.9,1.64,3.0,1.6]    #lo mas alto
        #out_person=[35,15,25.5,30.5,45.5,45.5]#example,building the web
        x_predict = np.array(out_person).reshape(1, -1)
        result = model_predict.predict(x_predict)
        result = str(result)
        #return result
        return render_template('predict_ale.html', result=result)
    # return redirect(url_for('predict_ale',out_person=out_person))
    return render_template('longevityform.html', title='LONGEVITY', form=form)
Пример #32
0
def ppf_fn2(q):
    return lognorm.ppf(q, 1, 0)
Пример #33
0
def SA_SOBOL(driver):
    # Uses the Sobel Method for SA.
    # Input:
    # inpt : no. of input factors
    # N: number of Sobel samples
    #
    # Output:
    # SI[] : sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------

    methd = 'SOBOL'
    method = '7'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------
    value = asarray(LHS.LHS(2*inpt, nSOBOL))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = norm.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
            value[:,j+inpt] = lognorm.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
            value[:,j+inpt] = beta.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = uniform.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])

    values = []
    XMA = value[0:nSOBOL, 0:inpt]
    XMB = value[0:nSOBOL, inpt:2 * inpt]
    YXMA = zeros((nSOBOL, otpt))
    YXMB = zeros((nSOBOL, otpt))
    if krig == 1:
        load("dmodel")
        YXMA = predictor(XMA, dmodel)
        YXMB = predictor(XMB, dmodel)
    else:
        values.extend(list(XMA))
        values.extend(list(XMB))

    YXMC = zeros((inpt, nSOBOL, otpt))
    for i in range(inpt):
        XMC = deepcopy(XMB)
        XMC[:, i] = deepcopy(XMA[:, i])
        if krig == 1:
            YXMC[i] = predictor(XMC, dmodel)
        else:
            values.extend(list(XMC))

    if krig != 1:
        out = iter(run_list(driver, values))
        for i in range(nSOBOL):
            YXMA[i] = out.next()
        for i in range(nSOBOL):
            YXMB[i] = out.next()
        for i in range(inpt):
            for j in range(nSOBOL):
                YXMC[i, j] = out.next()

    f0 = mean(YXMA,0)
    if otpt==1:
        V = cov(YXMA,None,0,1)
    else:  #multiple outputs
        V = diag(cov(YXMA,None,0,1))
    Vi = zeros((otpt, inpt))
    Vci = zeros((otpt, inpt))
    for i in range(inpt):
        for p in range(otpt):
            Vi[p,i] = 1.0/nSOBOL*sum(YXMA[:,p]*YXMC[i,:,p])-f0[p]**2;
            Vci[p,i]= 1.0/nSOBOL*sum(YXMB[:,p]*YXMC[i,:,p])-f0[p]**2;

    Si = zeros((otpt,inpt));
    Sti = zeros((otpt,inpt));
    for j in range(inpt):
        Si[:, j] = Vi[:, j] / V
        Sti[:, j] = 1 - Vci[:, j] / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(XMA, YXMA, otpt, nSOBOL)

# ----------------------  Analyze  ---------------------------

    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #34
0
def Finv_Lognormal(r, m, s):
    sln, mln = p_Lognormal(m, s)

    return lognorm.ppf(r, sln, 0, np.exp(mln))
Пример #35
0
    def __init__(
            self,
            T: float,
            rho: float,  # = discount rate
            rf: float,  # = risk-free rate
            mu: float,  # = risky rate means 
            sigma_sq: float,  # = risky rate var
            sigma: float,  # = risky sd
            gamma: float,  # = CRRA parameter
            min_num_states: int,  # = discreteness of state space
            wealth_steps: float,  # = space between wealth states
            num_actions: int,  # = discretenes of action space
            action_range: Tuple[float, float],  # = range of actions
            W_0: float  # = starting wealth
    ) -> None:
        """ Constructor
            1) initalize the variables
            2) set up the granularity of the state and action space
        """
        #set up the member variables
        self.T = T
        self.rho = rho
        self.rf = rf
        self.mu = mu
        self.sigma_sq = sigma_sq
        self.sigma = np.sqrt(sigma_sq)
        self.gamma = gamma
        self.action_range = action_range
        #self.num_states = num_states
        self.num_actions = action_range

        #discretize the action space
        self.A_set = np.linspace(action_range[0], action_range[1], num_actions)

        #####    discretize the state space    #######
        #TODO CHANGE TO LOWER BOUND THE WEALTH ALSO
        #decide how to discretize wealth
        #cap the max wealth, function of variance and time
        #varT = self.sigma * self.T
        #sd_end = np.sqrt(varT)
        ##TRY 1
        sd_end = np.sqrt(sigma_sq) * np.sqrt(self.T)
        print("sd_end: {}".format(sd_end))
        # print("sd_end {}".format(sd_end))
        # max_wealth =  W_0 + int(4*sd_end * W_0) #KEY VARIABLE, RANGE OF WEALTH STATES
        # min_wealth = max(0,W_0 - (max_wealth - W_0))
        ##TRY2
        max_wealth = W_0 * lognorm.ppf(.9995, s=sd_end)
        min_wealth = W_0 * lognorm.ppf(.0005, s=sd_end)
        W_states_count = int(
            np.max(np.array([min_num_states, max_wealth / wealth_steps])))

        #set up state matrix
        self.S_rows = np.arange(0, T + 1)
        self.S_cols = np.linspace(min_wealth, max_wealth, W_states_count)
        #print("min wealth {} max wealth {}".format(min_wealth,max_wealth))
        #print("S_cols 1 {}".format(self.S_cols))
        self.S_value = np.zeros((self.S_rows.size, self.S_cols.size))
        self.gap = self.S_cols[1] - self.S_cols[0]
        self.max_wealth = int(self.S_cols[-1:])
        self.min_wealth = int(self.S_cols[0])
        self.num_wealth_states = int(self.S_cols.size)

        #print( [(W,a) for W,a in product(self.S_cols , self.A_set) ])
        #precompute the transition probabilties
        self.trans_probs = np.array([
            np.array([self.transition_prob(0, W, a) for W in self.S_cols])
            for a in self.A_set
        ])

        # np.set_printoptions(precision=1,suppress=True)
        # print(self.trans_probs[0,0:10,0:10])
        # print(self.trans_probs[0,1,:])
        # print(self.trans_probs[0,1,:].shape)
        #print(self.trans_probs)
        #print(self.trans_probs.shape)
        #assert(self.trans_probs.shape == (self.num_actions, self.num_wealth_states))

        print("time steps: {} Max wealth: {} wealth steps: {}".format(
            self.T, max_wealth, W_states_count))
 def generic_dispersion(self, nd_dict, GH_dict=None):
     weight_arrays = []
     value_arrays = []
     for i in range(0,
                    len(self.simulation_options["dispersion_parameters"])):
         if self.simulation_options["dispersion_distributions"][
                 i] == "uniform":
             value_arrays.append(
                 np.linspace(
                     self.simulation_options["dispersion_parameters"][i] +
                     "_lower",
                     self.simulation_options["dispersion_parameters"][i] +
                     "_upper",
                     self.simulation_options["dispersion_bins"][i]))
             weight_arrays.append(
                 [1 / self.simulation_options["dispersion_bins"][i]] *
                 self.simulation_options["dispersion_bins"][i])
         elif self.simulation_options["dispersion_distributions"][
                 i] == "normal":
             param_mean = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_mean"]
             param_std = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_std"]
             if type(GH_dict) is dict:
                 param_vals = [
                     (param_std * math.sqrt(2) * node) + param_mean
                     for node in GH_dict["nodes"]
                 ]
                 param_weights = GH_dict["normal_weights"]
             else:
                 min_val = norm.ppf(1e-4, loc=param_mean, scale=param_std)
                 max_val = norm.ppf(1 - 1e-4,
                                    loc=param_mean,
                                    scale=param_std)
                 param_vals = np.linspace(
                     min_val, max_val,
                     self.simulation_options["dispersion_bins"][i])
                 param_weights = np.zeros(
                     self.simulation_options["dispersion_bins"][i])
                 param_weights[0] = norm.cdf(param_vals[0],
                                             loc=param_mean,
                                             scale=param_std)
                 param_midpoints = np.zeros(
                     self.simulation_options["dispersion_bins"][i])
                 param_midpoints[0] = norm.ppf((1e-4 / 2),
                                               loc=param_mean,
                                               scale=param_std)
                 for j in range(
                         1, self.simulation_options["dispersion_bins"][i]):
                     param_weights[j] = norm.cdf(
                         param_vals[j], loc=param_mean,
                         scale=param_std) - norm.cdf(param_vals[j - 1],
                                                     loc=param_mean,
                                                     scale=param_std)
                     param_midpoints[j] = (param_vals[j - 1] +
                                           param_vals[j]) / 2
                 param_vals = param_midpoints
             value_arrays.append(param_vals)
             weight_arrays.append(param_weights)
         elif self.simulation_options["dispersion_distributions"][
                 i] == "lognormal":
             param_loc = 0
             param_shape = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_shape"]
             param_scale = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_scale"]
             print("shape, scale", param_shape, param_scale)
             min_val = lognorm.ppf(1e-4,
                                   param_shape,
                                   loc=param_loc,
                                   scale=param_scale)
             max_val = lognorm.ppf(1 - 1e-4,
                                   param_shape,
                                   loc=param_loc,
                                   scale=param_scale)
             param_vals = np.linspace(
                 min_val, max_val,
                 self.simulation_options["dispersion_bins"][i])
             param_weights = np.zeros(
                 self.simulation_options["dispersion_bins"][i])
             param_weights[0] = lognorm.cdf(param_vals[0],
                                            param_shape,
                                            loc=param_loc,
                                            scale=param_scale)
             param_midpoints = np.zeros(
                 self.simulation_options["dispersion_bins"][i])
             param_midpoints[0] = lognorm.ppf((1e-4 / 2),
                                              param_shape,
                                              loc=param_loc,
                                              scale=param_scale)
             for j in range(1,
                            self.simulation_options["dispersion_bins"][i]):
                 param_weights[j] = lognorm.cdf(
                     param_vals[j],
                     param_shape,
                     loc=param_loc,
                     scale=param_scale) - lognorm.cdf(param_vals[j - 1],
                                                      param_shape,
                                                      loc=param_loc,
                                                      scale=param_scale)
                 param_midpoints[j] = (param_vals[j - 1] +
                                       param_vals[j]) / 2
             value_arrays.append(param_midpoints)
             weight_arrays.append(param_weights)
         elif self.simulation_options["dispersion_distributions"][
                 i] == "skewed_normal":
             param_mean = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_mean"]
             param_std = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_std"]
             param_skew = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_skew"]
             min_val = skewnorm.ppf(1e-4,
                                    param_skew,
                                    loc=param_mean,
                                    scale=param_std)
             max_val = skewnorm.ppf(1 - 1e-4,
                                    param_skew,
                                    loc=param_mean,
                                    scale=param_std)
             param_vals = np.linspace(
                 min_val, max_val,
                 self.simulation_options["dispersion_bins"][i])
             param_weights = np.zeros(
                 self.simulation_options["dispersion_bins"][i])
             param_weights[0] = skewnorm.cdf(param_vals[0],
                                             param_skew,
                                             loc=param_mean,
                                             scale=param_std)
             param_midpoints = np.zeros(
                 self.simulation_options["dispersion_bins"][i])
             param_midpoints[0] = skewnorm.ppf((1e-4 / 2),
                                               param_skew,
                                               loc=param_mean,
                                               scale=param_std)
             for j in range(1,
                            self.simulation_options["dispersion_bins"][i]):
                 param_weights[j] = skewnorm.cdf(
                     param_vals[j],
                     param_skew,
                     loc=param_mean,
                     scale=param_std) - skewnorm.cdf(param_vals[j - 1],
                                                     param_skew,
                                                     loc=param_mean,
                                                     scale=param_std)
                 param_midpoints[j] = (param_vals[j - 1] +
                                       param_vals[j]) / 2
             value_arrays.append(param_midpoints)
             weight_arrays.append(param_weights)
         elif self.simulation_options["dispersion_distributions"][
                 i] == "log_uniform":
             param_upper = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_logupper"]
             param_lower = nd_dict[
                 self.simulation_options["dispersion_parameters"][i] +
                 "_loglower"]
             min_val = loguniform.ppf(1e-4,
                                      param_lower,
                                      param_upper,
                                      loc=0,
                                      scale=1)
             max_val = loguniform.ppf(1 - 1e-4,
                                      param_lower,
                                      param_upper,
                                      loc=0,
                                      scale=1)
             param_vals = np.linspace(
                 min_val, max_val,
                 self.simulation_options["dispersion_bins"][i])
             param_weights = np.zeros(
                 self.simulation_options["dispersion_bins"][i])
             param_weights[0] = loguniform.cdf(min_val,
                                               param_lower,
                                               param_upper,
                                               loc=0,
                                               scale=1)
             param_midpoints = np.zeros(
                 self.simulation_options["dispersion_bins"][i])
             param_midpoints[0] = loguniform.ppf((1e-4) / 2,
                                                 param_lower,
                                                 param_upper,
                                                 loc=0,
                                                 scale=1)
             for j in range(1,
                            self.simulation_options["dispersion_bins"][i]):
                 param_weights[j] = loguniform.cdf(
                     param_vals[j],
                     param_lower,
                     param_upper,
                     loc=0,
                     scale=1) - loguniform.cdf(param_vals[j - 1],
                                               param_lower,
                                               param_upper,
                                               loc=0,
                                               scale=1)
                 param_midpoints[j] = (param_vals[j - 1] +
                                       param_vals[j]) / 2
             value_arrays.append(param_midpoints)
             weight_arrays.append(param_weights)
     total_len = np.prod(self.simulation_options["dispersion_bins"])
     weight_combinations = list(itertools.product(*weight_arrays))
     value_combinations = list(itertools.product(*value_arrays))
     sim_params = copy.deepcopy(
         self.simulation_options["dispersion_parameters"])
     for i in range(0, len(sim_params)):
         if sim_params[i] == "E0":
             sim_params[i] = "E_0"
         if sim_params[i] == "k0":
             sim_params[i] = "k_0"
     return sim_params, value_combinations, weight_combinations
Пример #37
0
def mapping_beta_lognorm(arr):
    from scipy.stats import lognorm
    return 1.-lognorm.ppf(1.-np.array(arr),0.7)
Пример #38
0
    def __init__(self, a, b, n, name, pa=0.1, pb=0.9, lognormal=False, Plot=True):

        mscale.register_scale(ProbitScale)

        if Plot:
            fig = plt.figure(facecolor="white")
            ax1 = fig.add_subplot(121, axisbelow=True)
            ax2 = fig.add_subplot(122, axisbelow=True)
            ax1.set_xlabel(name)
            ax1.set_ylabel("ECDF and Best Fit CDF")
            prop = matplotlib.font_manager.FontProperties(size=8)

        if lognormal:

            sigma = (log(b) - log(a)) / ((erfinv(2 * pb - 1) - erfinv(2 * pa - 1)) * (2 ** 0.5))
            mu = log(a) - erfinv(2 * pa - 1) * sigma * (2 ** 0.5)
            cdf = arange(0.001, 1.000, 0.001)
            ppf = map(lambda v: lognorm.ppf(v, sigma, scale=exp(mu)), cdf)

            x = lognorm.rvs(sigma, scale=exp(mu), size=n)
            x.sort()

            print "generating lognormal %s, p50 %0.3f, size %s" % (name, exp(mu), n)
            x_s, ecdf_x = ecdf(x)

            best_fit = lognorm.cdf(x, sigma, scale=exp(mu))
            if Plot:
                ax1.set_xscale("log")
                ax2.set_xscale("log")
            hist_y = lognorm.pdf(x_s, std(log(x)), scale=exp(mu))

        else:

            sigma = (b - a) / ((erfinv(2 * pb - 1) - erfinv(2 * pa - 1)) * (2 ** 0.5))
            mu = a - erfinv(2 * pa - 1) * sigma * (2 ** 0.5)
            cdf = arange(0.001, 1.000, 0.001)
            ppf = map(lambda v: norm.ppf(v, mu, scale=sigma), cdf)

            print "generating normal %s, p50 %0.3f, size %s" % (name, mu, n)
            x = norm.rvs(mu, scale=sigma, size=n)
            x.sort()
            x_s, ecdf_x = ecdf(x)
            best_fit = norm.cdf((x - mean(x)) / std(x))
            hist_y = norm.pdf(x_s, loc=mean(x), scale=std(x))
            pass

        if Plot:
            ax1.plot(ppf, cdf, "r-", linewidth=2)
            ax1.set_yscale("probit")
            ax1.plot(x_s, ecdf_x, "o")

            ax1.plot(x, best_fit, "r--", linewidth=2)

            n, bins, patches = ax2.hist(x, normed=1, facecolor="green", alpha=0.75)
            bincenters = 0.5 * (bins[1:] + bins[:-1])
            ax2.plot(x_s, hist_y, "r--", linewidth=2)
            ax2.set_xlabel(name)
            ax2.set_ylabel("Histogram and Best Fit PDF")
            ax1.grid(b=True, which="both", color="black", linestyle="-", linewidth=1)
            # ax1.grid(b=True, which='major', color='black', linestyle='--')
            ax2.grid(True)

        return
Пример #39
0
def SA_FAST(driver):
    
    # First order indicies for a given model computed with Fourier Amplitude Sensitivity Test (FAST).
    # R. I. Cukier, C. M. Fortuin, Kurt E. Shuler, A. G. Petschek and J. H. Schaibly.
    # Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients.
    # I-III Theory/Applications/Analysis The Journal of Chemical Physics
    #
    # Input:
    # inpt : no. of input factors
    #
    # Output:
    # SI[] : sensitivity indices
    # Other used variables/constants:
    # OM[] : frequencies of parameters
    # S[] : search curve
    # X[] : coordinates of sample points
    # Y[] : output of model
    # OMAX : maximum frequency
    # N : number of sample points
    # AC[],BC[]: fourier coefficients
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------
    methd = 'FAST'
    method = '9'
    
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------
    
    #
    MI = 4#: maximum number of fourier coefficients that may be retained in
    # calculating the partial variances without interferences between the assigned frequencies
    #
    # Frequency assignment to input factors.
    OM = SETFREQ(inpt)
    # Computation of the maximum frequency
    # OMAX and the no. of sample points N.
    OMAX = int(OM[inpt-1])
    N = 2 * MI * OMAX + 1
    # Setting the relation between the scalar variable S and the coordinates
    # {X(1),X(2),...X(inpt)} of each sample point.
    S = pi / 2.0 * (2 * arange(1,N+1) - N-1) / N
    ANGLE = matrix(OM).T * matrix(S)
    X = 0.5 + arcsin(sin(ANGLE.T)) / pi
    # Transform distributions from standard uniform to general.

    for j in range(inpt):    
        if stvars[j].dist == 'NORM':
            X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])        
        elif stvars[j].dist == 'LNORM':        
            X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':        
            X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':        
            X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # Do the N model evaluations.
    Y = zeros((N, otpt))        
    if krig == 1:            
        load("dmodel")            
        Y = predictor(X, dmodel)            
    else:
        values = []            
        for p in range(N):
#            print 'Running simulation on test',p+1,'of',N
#            Y[p] = run_model(driver, array(X[p])[0])
            values.append(array(X[p])[0])
        Y = run_list(driver, values)

    # Computation of Fourier coefficients.
    AC = zeros((N, otpt))# initially zero
    BC = zeros((N, otpt))# initially zero
#    q = int(N / 2)-1
    q = (N-1)/2
    for j in range(2,N+1,2):    # j is even
#        print "Y[q]",Y[q]
#        print "matrix(cos(pi * j * arange(1,q+) / N))",matrix(cos(pi * j * arange(1,q+1) / N))
#        print "matrix(Y[q + arange(0,q)] + Y[q - arange(0,q)])",matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)])
        AC[j-1] = 1.0 / N * matrix(Y[q] + matrix(cos(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)]))
    for j in range(1,N+1,2):    # j is odd
        BC[j-1] = 1.0 / N * matrix(sin(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] - Y[q - arange(1,q+1)])

    # Computation of the general variance V in the frequency domain.
    V = 2 * (matrix(AC).T * matrix(AC) + matrix(BC).T * matrix(BC))
    # Computation of the partial variances and sensitivity indices.
    # Si=zeros(inpt,otpt);
    Si = zeros((otpt,otpt,inpt));
    for i in range(inpt):    
        Vi = zeros((otpt, otpt))    
        for j in range(1,MI+1): 
            idx = j * OM[i]-1     
            Vi = Vi + AC[idx].T * AC[idx] + BC[idx].T * BC[idx]
        Vi = 2. * Vi
        Si[:, :, i] = Vi / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------
    
    Sti = []# appears right after the call to this method in the original PCC_Computation.m
    
#    if plotf == 1:    
#        piecharts(inpt, otpt, Si, Sti, method, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):        
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T

    Results = {'FirstOrderSensitivity': Si}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #40
0
def SA_EFAST(driver):

    #[SI,STI] = EFAST(K,WANTEDN)
    # First order and total effect indices for a given model computed with
    # Extended Fourier Amplitude Sensitivity Test (EFAST).
    # Andrea Saltelli, Stefano Tarantola and Karen Chan. 1999
    # A quantitative model-independent method for global sensitivity analysis of model output.
    # Technometrics 41:39-56
    #
    # Input:
    # inpt : no. of input factors
    # WANTEDN : wanted no. of sample points
    #
    # Output:
    # SI[] : first order sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # OM[] : vector of inpt frequencies
    # OMI : frequency for the group of interest
    # OMCI[] : set of freq. used for the compl. group
    # X[] : parameter combination rank matrix
    # AC[],BC[]: fourier coefficients
    # FI[] : random phase shift
    # V : total output variance (for each curve)
    # VI : partial var. of par. i (for each curve)
    # VCI : part. var. of the compl. set of par...
    # AV : total variance in the time domain
    # AVI : partial variance of par. i
    # AVCI : part. var. of the compl. set of par.
    # Y[] : model output
    # N : no. of runs on each curve

    # ----------------------  Setup  ---------------------------
    methd = 'EFAST'
    method = '10'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------

    NR = 1#: no. of search curves
    MI = 4#: maximum number of fourier coefficients that may be retained in calculating
    # the partial variances without interferences between the assigned frequencies
    #
    # Computation of the frequency for the group of interest OMi and the no. of sample points N.
    OMi = int(floor((nEFAST / NR - 1) / (2 * MI) / inpt))
    N = 2 * MI * OMi + 1
    total_sims = N*NR*inpt
    sim = 0
    if (N * NR < 65):
        logging.error('sample size must be >= 65 per factor.')
        raise ValueError,'sample size must be >= 65 per factor.'

    # Algorithm for selecting the set of frequencies. OMci(i), i=1:inpt-1, contains
    # the set of frequencies to be used by the complementary group.
    OMci = SETFREQ(N - 1, OMi / 2 / MI)
    # Loop over the inpt input factors.
    Si = zeros((otpt,otpt,inpt));
    Sti = zeros((otpt,otpt,inpt));
    for i in range(inpt):
        # Initialize AV,AVi,AVci to zero.
        AV = 0
        AVi = 0
        AVci = 0
        # Loop over the NR search curves.
        for L in range(NR):
            # Setting the vector of frequencies OM for the inpt factors.
            cj = 1
            OM = zeros(inpt)
            for j in range(inpt):
                if (j == i):
                    # For the factor of interest.
                    OM[i] = OMi
                else:
                    # For the complementary group.
                    OM[j] = OMci[cj]
                    cj = cj + 1
            # Setting the relation between the scalar variable S and the coordinates
            # {X(1),X(2),...X(inpt)} of each sample point.
            FI = zeros(inpt)
            for j in range(inpt):
                FI[j] = random.random() * 2 * pi        # random phase shift
            S_VEC = pi * (2 * arange(1,N+1) - N - 1) / N
            OM_VEC = OM[range(inpt)]
            FI_MAT = transpose(array([FI]*N))
            ANGLE = matrix(OM_VEC).T*matrix(S_VEC) + matrix(FI_MAT)
            X = 0.5 + arcsin(sin(ANGLE.T)) / pi
            # Transform distributions from standard uniform to general.

            for j in range(inpt):
                if stvars[j].dist == 'NORM':
                    X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
                elif stvars[j].dist == 'LNORM':
                    X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
                elif stvars[j].dist == 'BETA':
                    X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
                elif stvars[j].dist == 'UNIF':
                    X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

            # Do the N model evaluations.
            Y = zeros((N, otpt))
            if krig == 1:
                load("dmodel")
                Y = predictor(X, dmodel)
            else:
                values = []
                for p in range(N):
#                    sim += 1
#                    print 'Running simulation on test',sim,'of',total_sims
#                    Y[p] = run_model(driver, array(X[p])[0])
                    values.append(array(X[p])[0])
                Y = run_list(driver, values)

            # Subtract the average value.
            Y = Y - kron(mean(Y,0), ones((N, 1)))

            # Fourier coeff. at [1:OMi/2].
            NQ = int(N / 2)-1
            N0 = NQ + 1
            COMPL = 0
            Y_VECP = Y[N0+1:] + Y[NQ::-1]
            Y_VECM = Y[N0+1:] - Y[NQ::-1]
#            AC = zeros((int(ceil(OMi / 2)), otpt))
#            BC = zeros((int(ceil(OMi / 2)), otpt))
            AC = zeros((OMi * MI, otpt))
            BC = zeros((OMi * MI, otpt))
            for j in range(int(ceil(OMi / 2))+1):
                ANGLE = (j+1) * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j] = (Y[N0] +matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j]).T * matrix(AC[j]) + matrix(BC[j]).T * matrix(BC[j])
            # Computation of V_{(ci)}.
            Vci = 2 * COMPL
            AVci = AVci + Vci
            # Fourier coeff. at [P*OMi, for P=1:MI].
            COMPL = 0
# Do these need to be recomputed at all?
#            Y_VECP = Y[N0 + range(NQ)] + Y[N0 - range(NQ)]
#            Y_VECM = Y[N0 + range(NQ)] - Y[N0 - range(NQ)]
            for j in range(OMi, OMi * MI + 1, OMi):
                ANGLE = j * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j-1] = (Y[N0] + matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j-1] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j-1]).T * matrix(AC[j-1]) + matrix(BC[j-1]).T * matrix(BC[j-1])
            # Computation of V_i.
            Vi = 2 * COMPL
            AVi = AVi + Vi
            # Computation of the total variance in the time domain.
            AV = AV +  matrix(Y).T * matrix(Y) / N
        # Computation of sensitivity indicies.
        AV = AV / NR
        AVi = AVi / NR
        AVci = AVci / NR
        Si[:, :, i] = AVi / AV
        Sti[:, :, i] = 1 - AVci / AV

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

#    if plotf == 1:
#        piecharts(inpt, otpt, Si, Sti, methd, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T
    if simple == 1:
        Sti_t = zeros((inpt,otpt))
        for p in range(inpt):
            Sti_t[p] = diag(Sti[:, :, p])
        Sti = Sti_t.T
    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #41
0
  def gen_obs(self):
    uMean = uStdDev = cMean = pUnc = np.empty(self.nSeg, dtype=float)
    sProb = cProb = np.array([np.empty(4, dtype=float) for i in xrange(self.nSeg)])
    jRV = sVeh = tRte = np.empty(self.nObs, dtype=float)
    tVeh = np.array([np.empty(self.nObs, dtype=float) for i in xrange(self.nSeg)])
    times = [[] for i in xrange(self.nSeg)]
    totals = []

    uMean, uStdDev = self.get_means_stdv()
#Randomize

    # for k in xrange(1,nSeg+1):
      # 'Read the means and standard deviations for uncongested and congested times'
      # uMean(k) = Worksheets("Route").Cells(3, 3 + k).Value
      # uStdDev(k) = Worksheets("Route").Cells(4, 3 + k).Value
      # cMean(k) = Worksheets("Route").Cells(5, 3 + k).Value
      # cStdDev(k) = Worksheets("Route").Cells(6, 3 + k).Value
      # 'Read the state transitions'
      # sProb(k, 1) = Worksheets("Route").Cells(8, 3 + k).Value
      # cProb(k, 1) = sProb(k, 1)
      # sProb(k, 2) = Worksheets("Route").Cells(9, 3 + k).Value
      # cProb(k, 2) = cProb(k, 1) + sProb(k, 2)
      # sProb(k, 3) = Worksheets("Route").Cells(10, 3 + k).Value
      # cProb(k, 3) = cProb(k, 2) + sProb(k, 3)
      # sProb(k, 4) = Worksheets("Route").Cells(11, 3 + k).Value
      # cProb(k, 4) = cProb(k, 3) + sProb(k, 4)
      # pUnc(k) = cProb(k, 2)


#Initialize the states of the vehicles'
    for j in xrange(self.nObs)
      k = 1 #'use the state probabilities for the first segment'
      srv = random.uniform(0,1) #'state selection random variable'
      if srv < cProb[0][k]):
        sVeh[j] = 1
      elif srv < cProb[1][k]:
        sVeh[j] = 2
      elif srv < cProb[2][k]:
        sVeh[j] = 3
      else:
        sVeh[j] = 4
  
    for j in xrange(self.nObs):
      rvu = random.uniform(0,1) #'uncongested travel rate - driver type'
      tTot = 0.
      for k in xrange(self.nSeg)
        if sVeh[j] <= 2: 
          rvt = rvu
          mean = uMean[k]
          StdDev = uStdDev[k]
        else:
          rvt = random.uniform(0,1)
          mean = cMean[k]
          StdDev = cStdDev[k]

        tSeg = lognorm.ppf(rvt, s = StdDev, scale = np.exp(mean))
        times[k].append(tSeg)
        tVeh[k][j] = tSeg
        tTot = tTot + tSeg
        #'Update the state of the vehicle'
        if k < self.nSeg: 
          if sVeh[j] == 1 or sVeh[j] == 3:
            srv = 0.5 * random.uniform(0,1) #'state selection random variable'
            if srv < cProb[0][k+1]:
              sVeh[j] = 1 
            else:
             sVeh[j] = 2
          else:# '(sVeh(j) = 2) Or (sVeh(j) = 4)'
            srv = 0.5 + 0.5 * random.uniform(0,1) #'state selection random variable'
            if srv < cProb[2][k+1]:
              sVeh[j] = 3
            else:
              sVeh[j] = 4
      totals.append(tTot)

    self.tTimes = times
    self.tTotals = totals
Пример #42
0
def lognorm_trunc(p, low, up, mu, sig):
    cdf2 = lognorm.cdf(up, sig, loc=0, scale=np.exp(mu))
    cdf1 = lognorm.cdf(low, sig, loc=0, scale=np.exp(mu))
    pc = p * (cdf2 - cdf1) + cdf1
    dt = lognorm.ppf(pc, sig, loc=0, scale=np.exp(mu))
    return dt
Пример #43
0
def UP_MCS(driver):
    # Uses the MCS method for UP

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    #*****************RANDOM DRAWS FROM INPUT DISTRIBUTIONS********************
    value = asarray(LHS.LHS(inpt, nMCS))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # ----------------------  Model  ---------------------------

    out = zeros((nMCS, otpt))
    if krig == 1:
        load("dmodel")
        out = predictor(value, dmodel)
    else:
#        for i in range(nMCS):
#            print 'Running simulation',i+1,'of',nMCS,'with inputs',value[i]
#            out[i] = run_model(driver, value[i])
        out = run_list(driver, value)

    limstate = asarray(limstate)
    limstate1 = asarray(kron(limstate[:, 0], ones(nMCS))).reshape(otpt,nMCS).transpose()
    limstate2 = asarray(kron(limstate[:, 1], ones(nMCS))).reshape(otpt,nMCS).transpose()
    B = logical_and(greater_equal(out,limstate1),less_equal(out,limstate2))
    PCC = sum(B,0) / nMCS
    B_t = B[sum(B,1) == otpt]
    if otpt > 1 and not 0 in PCC[0:otpt]:
        PCC = append(PCC,len(B_t) / nMCS)

    #Moments
    CovarianceMatrix = matrix(cov(out,None,0))#.transpose()
    Moments = {'Mean': mean(out,0), 'Variance': diag(CovarianceMatrix), 'Skewness': skew(out), 'Kurtosis': kurtosis(out,fisher=False)}

    # combine the display of the correlation matrix with setting a var that will be needed below
    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    C_Y = [0]*otpt
    for k in range(0,otpt):
        if Moments['Variance'][k]!=0:
            C_Y[k] = estimate_complexity.with_samples(out[:,k],nMCS)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    Distribution = {'Complexity': C_Y}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)
			
    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'PCC': PCC}

    return Results