Пример #1
0
def inverse_hyper(hyper_prob):
	pr_gamma, pr_lnc, pr_logsm = hyper_prob

	gamma = norm.ppf(pr_gamma, 1., 1.)
	lnc = uniform.ppf(pr_lnc, -3., 6.)
	logsm2 = uniform.ppf(pr_logsm, -4., 6.)

	hyper = np.array([ gamma, np.exp(lnc), (10.**logsm2)**0.5 ])	

	return hyper
Пример #2
0
def inverse_hyper(hyper_prob):
	prob_C0, prob_power, prob_sigma, prob_trans = \
	hyper_prob[0], hyper_prob[1:1+n_pop], hyper_prob[1+n_pop:1+2*n_pop], hyper_prob[1+2*n_pop:3*n_pop]
	
	C0 = np.exp( uniform.ppf(prob_C0,-3.,6.) )
	power = norm.ppf(prob_power, 0.,5.)
	sigma = 10.**( uniform.ppf(prob_sigma, -2., 2.) )
	trans = np.sort( 10.**( uniform.ppf(prob_trans, -4., 10.) ) ) # sort

	hyper = np.hstack(( C0, power, sigma, trans ))

	return hyper
Пример #3
0
def inverse_hyper(hyper_prob):
	prob_C0, prob_slope, prob_sigma, prob_trans = \
	hyper_prob[0], hyper_prob[1:1+n_pop], hyper_prob[1+n_pop:1+2*n_pop], hyper_prob[1+2*n_pop:3*n_pop]
	
	C0 = uniform.ppf(prob_C0,-1.,2.)
	slope = norm.ppf(prob_slope, 0.,5.)
	sigma = 10.**( uniform.ppf(prob_sigma, -3., 5.) )
	trans = np.sort( uniform.ppf(prob_trans, -4., 10.) ) # sort

	hyper = np.hstack(( C0, slope, sigma, trans ))

	return hyper
Пример #4
0
def calculate_r(h_prop, pi_prop, gamma_prop, h, pi, gamma): 

    #h
    uniform.ppf(h)
    uniform.ppf(h_prop)
    
    #pi
    uniform.ppf(pi)
    uniform.ppf(pi_prop)
    
    #gamma
    g = pi**gamma.sum()*(1-pi)**(p-gamma.sum()) 
    g_prop = pi_prop**gamma.sum()*(1-pi_prop)**(p-gamma.sum())
    
    #y current
    
    G = genotype.iloc[:, 4:984]
    
    Q = G.T.dot(beta).reset_index()
    Q = Q.drop(columns = 'index')
    
    mean = np.array(mu + Q).ravel()
    cov = np.identity(980) * (1 / tau)
    ys = np.random.multivariate_normal(mean, cov)
    mylist = norm.pdf(ys)
    y = np.prod(np.array(mylist))

    #proposed
    gamma_df = pd.DataFrame(data = gamma_prop)
    gamma_df.loc[gamma==1] = beta
    beta_prop=gamma_df
    
    
    Q_prop = G.T.dot(beta_prop).reset_index()
    Q_prop = Q_prop.drop(columns = 'index')
    
    mean_prop = np.array(mu + Q_prop).ravel()
    ys= np.random.multivariate_normal(mean_prop, cov)
    mylist = norm.pdf(ys)
    y_prop = np.prod(np.array(mylist))
    
    r = (uniform.ppf(h_prop)*uniform.ppf(pi_prop)*g_prop* )/()
    
    if (u < r): 
        h = h_prop
        pi = p_prop
        gamma = gamma_prop
    
    return h, pi, gamma
Пример #5
0
    def univariate_make_normal(self, uni_data, extension, precision):
        """
        Takes univariate data and transforms it to have approximately normal dist
        We do this through the simple composition of a histogram equalization
        producing an approximately uniform distribution and then the inverse of the
        normal CDF. This will produce approximately gaussian samples.
        Parameters
        ----------
        uni_data : ndarray
        The univariate data [Sx1] where S is the number of samples in the dataset
        extension : float
        Extend the marginal PDF support by this amount.
        precision : int
        The number of points in the marginal PDF

        Returns
        -------
        uni_gaussian_data : ndarray
        univariate gaussian data
        params : dictionary
        parameters of the transform. We save these so we can invert them later
        """
        data_uniform, params = self.univariate_make_uniform(
            uni_data.T, extension, precision)
        if self.base == "gauss":
            return norm.ppf(data_uniform).T, params
        elif self.base == "uniform":
            return uniform.ppf(data_uniform).T, params
        else:
            raise ValueError(f"Unrecognized base dist: {self.base}.")
Пример #6
0
    def test_interval(self):
        # open interval should have logp of 0
        interval = Interval()
        assert_equal(interval.logp(0), 0)

        # semi closed interval
        interval.ub = 1000
        assert_equal(interval.logp(0), 0)
        assert_equal(interval.logp(1001), -np.inf)

        # you should be able to send in multiple values
        assert_equal(
            interval.logp(np.array([1.0, 1002.0])), np.array([0, -np.inf])
        )

        # fully closed interval
        interval.lb = -1000
        assert_equal(interval.logp(-1001), -np.inf)
        assert_equal(interval.lb, -1000)
        assert_equal(interval.ub, 1000)
        assert_equal(interval.logp(0), np.log(1 / 2000.0))

        # you should be able to send in multiple values
        assert_equal(
            interval.logp(np.array([1.0, 2.0])),
            np.array([np.log(1 / 2000.0)] * 2),
        )

        # try and set lb higher than ub
        interval.lb = 1002
        assert_equal(interval.lb, 1000)
        assert_equal(interval.ub, 1002)

        # if val is outside closed range then rvs is used
        vals = interval.valid(np.linspace(990, 1005, 100))
        assert_(np.max(vals) <= 1002)
        assert_(np.min(vals) >= 1000)
        assert_(np.isfinite(interval.logp(vals)).all())

        # if bounds are semi-open then val is reflected from lb
        interval.ub = None
        interval.lb = 1002
        x = np.linspace(990, 1001, 10)
        vals = interval.valid(x)
        assert_almost_equal(vals, 2 * interval.lb - x)
        assert_equal(interval.valid(1003), 1003)

        # if bounds are semi-open then val is reflected from ub
        interval.lb = None
        interval.ub = 1002
        x = np.linspace(1003, 1005, 10)
        vals = interval.valid(x)
        assert_almost_equal(vals, 2 * interval.ub - x)
        assert_equal(interval.valid(1001), 1001)

        # ppf for Interval
        interval.lb = -10.0
        interval.ub = 10.0
        rando = np.random.uniform(size=10)
        assert_equal(interval.invcdf(rando), uniform.ppf(rando, -10, 20))
Пример #7
0
def inverse_hyper(hyper_prob):
	prob_C0, prob_slope, prob_sigma, prob_trans = \
	hyper_prob[0], hyper_prob[1:1+n_pop], hyper_prob[1+n_pop:1+2*n_pop], hyper_prob[1+2*n_pop:3*n_pop]
	
	C0 = uniform.ppf(prob_C0,-1.,2.)
	slope = norm.ppf(prob_slope, 0.,5.)
	sigma = 10.**( uniform.ppf(prob_sigma, -3., 3.) )
	#trans = np.sort( uniform.ppf(prob_trans, m_min, m_max-m_min) ) # sort
	if (np.sort(prob_trans)==prob_trans).all():
		trans = uniform.ppf(prob_trans, m_min, m_max-m_min)
	else:
		trans = np.zeros(3)

	hyper = np.hstack(( C0, slope, sigma, trans ))

	return hyper
Пример #8
0
def _r_func(q_star, w, cfg, delta):

    return uniform.ppf(
        1 - (q_star / w),
        cfg['mean'],
        cfg['std_deviation'] - cfg['mean'],
    )
Пример #9
0
def qunif(p, minimum=0,maximum=1):
    """
    Calculates the quantile function of the uniform distribution
    """
    from scipy.stats import uniform
    result=uniform.ppf(q=p,loc=minimum,scale=maximum-minimum)
    return result
Пример #10
0
def inverse_hyper(hyper_prob):
	prob_C0, prob_slope, prob_sigma, prob_trans = \
	hyper_prob[0], hyper_prob[1:1+n_pop], hyper_prob[1+n_pop:1+2*n_pop], hyper_prob[1+2*n_pop:3*n_pop]
	
	C0 = uniform.ppf(prob_C0,-1.,2.)
	slope = norm.ppf(prob_slope, 0.,5.)
	sigma = 10.**( uniform.ppf(prob_sigma, -3., 3.) )
	#trans = np.sort( uniform.ppf(prob_trans, m_min, m_max-m_min) ) # sort
	trans1 = uniform.ppf(prob_trans[0], m_min, m_max-m_min)
	trans2 = uniform.ppf(prob_trans[1], trans1, m_max-trans1)
	trans3 = uniform.ppf(prob_trans[2], trans2, m_max-trans2)
	trans = np.array([trans1, trans2, trans3])
	
	hyper = np.hstack(( C0, slope, sigma, trans ))

	return hyper
Пример #11
0
def inverse_hyper(hyper_prob):
    prob_C0, prob_slope, prob_sigma, prob_trans = (
        hyper_prob[0],
        hyper_prob[1 : 1 + n_pop],
        hyper_prob[1 + n_pop : 1 + 2 * n_pop],
        hyper_prob[1 + 2 * n_pop : 3 * n_pop],
    )

    C0 = uniform.ppf(prob_C0, -1.0, 2.0)
    slope = norm.ppf(prob_slope, 0.0, 5.0)
    sigma = 10.0 ** (uniform.ppf(prob_sigma, -3.0, 5.0))
    trans = np.sort(uniform.ppf(prob_trans, -4.0, 10.0))

    hyper = np.hstack((C0, slope, sigma, trans))

    return hyper
Пример #12
0
    def _get_params_from_lh(self, sample_size):
        """
        Based on the Latin Hypercube Sampling strategy first introduced in:
        McKay, M.D., Beckman, R.J., Conover, W.J. (1979) A Comparison of Three Methods for Selecting Values of Input
        Variables in the Analysis of Output from a Computer Code. Technometrics 21, 239. https://doi.org/10.2307/1268522
        """
        # get the limits for the uniform distributions of each parameter
        bounds = np.asarray(
            [[self.model.parameters.ranges[p][0], self.model.parameters.ranges[p][1]] for p in self.param_names],
            dtype=np.float64
        )

        # get number of parameters
        nb_params = len(self.param_names)

        # create a matrix of random values
        random_matrix = np.random.rand(sample_size, nb_params)

        # randomly permute the segment to be used for each parameter
        sampling_plan = np.zeros((sample_size, nb_params), dtype=np.float64)
        for p in range(nb_params):
            sampling_plan[:, p] = np.random.permutation(sample_size)

        # move away from (randomly selected) segment lower bound by a random value
        sampling_plan += random_matrix
        # standardise values to get values between 0 and 1
        sampling_plan /= sample_size

        # use sampling plan and inverse cumulative distribution function (CDF) of uniform dist. to get parameter values
        parameters = np.zeros((sample_size, nb_params), dtype=np.float64)
        for p in range(nb_params):
            parameters[:, p] = uniform.ppf(sampling_plan[:, p], bounds[p][0], bounds[p][1] - bounds[p][0])

        # return the matrix containing the sample of parameter sets
        return parameters
Пример #13
0
def uniform_break_points(no_intervals, start, end):
    """
    Return uniformly distributed break points
    :param start: The start of the interval
    :param end: The end of the interval
    :return: A list of break points
    """
    points = uniform.ppf([float(i) / no_intervals for i in xrange(no_intervals)])
    return points * (end - start) + start
    def latin_cube_init(self):
        samples = lhs(self.dim_design_space,
                      samples=self.num_init_samples,
                      criterion='center')
        train_ = np.zeros(samples.shape)

        for i in range(self.dim_design_space):
            loc_ = self.bounds[i, 0]
            scale_ = self.bounds[i, 1] - self.bounds[i, 0]
            train_[:, i] = uniform.ppf(samples[:, i], loc=loc_, scale=scale_)

        self.design_parameter += train_.tolist()
        self.train_features = np.vstack((self.train_features, train_))

        parent_idx = -1
        for idx in range(self.num_init_samples):
            num_success_, grasp_quality_, mass_ = self.do_experiment(idx)
            self.num_trials += [self.iter_per_object]
            self.num_success += [num_success_]
            self.grasp_quality += [grasp_quality_]
            self.mass += [mass_]
            print(mass_)
            # TODO : post processing grasp_quality values
            new_label = self.post_processing_per_design(idx)
            print(new_label)
            parent_idx = self.process_bb.add_observation(
                new_label[0], new_label[1], np.average(num_success_))

        while parent_idx != -1:
            num_success_, grasp_quality_, _ = self.do_experiment(parent_idx)
            # update data
            self.num_success[parent_idx] += num_success_
            for obj_idx in range(self.num_objects):
                self.grasp_quality[parent_idx][obj_idx] = np.hstack(
                    (self.grasp_quality[parent_idx][obj_idx],
                     grasp_quality_[obj_idx]))

            self.num_trials[parent_idx] += self.iter_per_object
            new_label = self.post_processing_per_design(parent_idx)
            parent_idx = self.process_bb.update_observation(
                new_label[0],
                new_label[1],
                np.average(self.num_success[parent_idx]),
                parent_idx=parent_idx)
        """update gaussian process fitting & predict & new candidates"""
        self.train_labels = (self.process_bb.upper_bounds +
                             self.process_bb.lower_bounds) * 0.5
        noise = (
            (self.process_bb.upper_bounds - self.process_bb.lower_bounds) *
            0.5 / self.gamma)
        for obj in range(self.num_objectives):
            if obj == 1:
                self.gp[obj].alpha = noise[:, obj]
            self.gp[obj].fit(self.train_features, self.train_labels[:, obj])
Пример #15
0
def Dist(stvars, value, inpt):
    v = zeros(inpt)
    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            v[j] = norm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':        
            v[j] = lognorm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':        
            v[j] = beta.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':        
            v[j] = uniform.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    return v
Пример #16
0
def inverse_local(local_prob, hyper):
	n_group = len(local_prob)/2

	# Rad
	rad_th = uniform.ppf(local_prob[:n_group], 0.1, 10.)	

	# Mass
	gamma, c, sm = hyper[0], hyper[1], hyper[2]
	mu = c * rad_th ** gamma
	mass_th = norm.ppf(local_prob[n_group:], mu, sm)

	local = np.hstack((rad_th, mass_th))

	return local
Пример #17
0
def inverse_local(local_prob, hyper):
	# R(0,i) for fix mass
	prob_R0 = local_prob[0:n_fixm]
	R0 = piece_linear(hyper, M0, prob_R0)

	# M(1,i) for variable mass
	prob_M1 = local_prob[n_fixm:n_fixm+n_varm]
	M1 = uniform.ppf(prob_M1, -4., 10.)

	# R(1,i) for varibable mass
	prob_R1 = local_prob[n_fixm+n_varm:]
	R1 = piece_linear(hyper, M1, prob_R1)

	local = np.hstack((R0, M1, R1))

	return local
Пример #18
0
    def icdf(self, x):
        '''
        Evaluate the inverse cumulative distribution function (icdf) of the
        uniform random variable at points x.

        Parameters
        ----------
        x : list or numpy.ndarray
            The points at which the icdf is to be evaluated.

        Returns
        -------
        numpy.ndarray
            The evaluations of the icdf.

        '''
        return uniform.ppf(x, self.inf, self.sup - self.inf)
Пример #19
0
    def generateLatinHypercubeSampledMultipliers(self, specification_map, number_samples) :
            
        # Construct sets of random sampled multipliers from the selected distribution for each parameter
        multiplier_sets = {}
        for key, specification in specification_map.items() :

            # Generate stratified random probability values for distribution generation via inverse CDF
            stratified_random_probabilities = ((np.array(range(number_samples)) + np.random.random(number_samples))/number_samples)

            # Use stratified random probability values to generate stratified samples from selected distribution via inverse CDF
            distribution = specification['distribution']
            if distribution == 'uniform' :
                lower = specification['settings']['lower']
                base = specification['settings']['upper'] - lower
                multiplier_sets[key] = uniform.ppf(stratified_random_probabilities, loc=lower, scale=base).tolist()
            elif distribution == 'normal' :
                mean = specification['settings']['mean']
                std_dev = specification['settings']['std_dev']
                multiplier_sets[key] = norm.ppf(stratified_random_probabilities, loc=mean, scale=std_dev).tolist()
            elif distribution == 'triangular' :
                a = specification['settings']['a']
                base = specification['settings']['b'] - a
                c_std = (specification['settings']['c'] - a)/base
                multiplier_sets[key] = triang.ppf(stratified_random_probabilities, c_std, loc=a, scale=base).tolist()
            elif distribution == 'lognormal' :
                lower = specification['settings']['lower']
                scale = specification['settings']['scale']
                sigma = specification['settings']['sigma']
                multiplier_sets[key] = lognorm.ppf(stratified_random_probabilities, sigma, loc=lower, scale=scale).tolist()
            elif distribution == 'beta' :
                lower = specification['settings']['lower']
                base = specification['settings']['upper'] - lower
                a = specification['settings']['alpha']
                b = specification['settings']['beta']
                multiplier_sets[key] = beta.ppf(stratified_random_probabilities, a, b, loc=lower, scale=base).tolist()

        # Randomly select from sampled multiplier sets without replacement to form multipliers (dictionaries)
        sampled_multipliers = []
        for i in range(number_samples) :
            sampled_multiplier = {}
            for key, multiplier_set in multiplier_sets.items() :
                random_index = np.random.randint(len(multiplier_set))
                sampled_multiplier[key] = multiplier_set.pop(random_index)
            sampled_multipliers.append(sampled_multiplier)

        return sampled_multipliers
Пример #20
0
def Dist(stvars, value, inpt):
    v = zeros(inpt)
    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            v[j] = norm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0],
                            stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            v[j] = lognorm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[1], 0,
                               exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            v[j] = beta.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0],
                            stvars[j].param[1], stvars[j].param[2],
                            stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            v[j] = uniform.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0],
                               stvars[j].param[1])

    return v
Пример #21
0
def uniform_break_points(no_intervals, start, end):
    """Uniformly distributed break points between start and end.
    The break points will be placed at equal distance but starting at start and
    ending before end.

    :param no_intervals: Number of intervals desired.
    :type no_intervals: int

    :param start: Start of the interval. This will also be the first break point.
    :type start: float

    :param end: End point of the interval. This is _not_ included as a break point.
    :type end: float

    :returns: a list of no_intervals break points
    :rtype: list
    """
    points = uniform.ppf(
        [float(i) / no_intervals for i in xrange(no_intervals)])
    return points * (end - start) + start
Пример #22
0
    def inverse_transform_sampling(self, uni_samples):
        """
        Creates samples using inverse probability integral transformation.

        Parameters
        ----------
        uni_samples: float ndarray
            An array of floating point values in the [0, 1] domain.
        """
        if self.distribution == 'normal':
            self.samples = norm.ppf(uni_samples,
                                    loc=self.theta[0],
                                    scale=self.theta[1])

        elif self.distribution == 'lognormal':
            self.samples = np.exp(
                norm.ppf(uni_samples,
                         loc=np.log(self.theta[0]),
                         scale=self.theta[1]))
        elif self.distribution == 'uniform':
            self.samples = uniform.ppf(uni_samples,
                                       loc=self.theta[0],
                                       scale=self.theta[1] - self.theta[0])
Пример #23
0
def SA_FAST(driver):
    
    # First order indicies for a given model computed with Fourier Amplitude Sensitivity Test (FAST).
    # R. I. Cukier, C. M. Fortuin, Kurt E. Shuler, A. G. Petschek and J. H. Schaibly.
    # Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients.
    # I-III Theory/Applications/Analysis The Journal of Chemical Physics
    #
    # Input:
    # inpt : no. of input factors
    #
    # Output:
    # SI[] : sensitivity indices
    # Other used variables/constants:
    # OM[] : frequencies of parameters
    # S[] : search curve
    # X[] : coordinates of sample points
    # Y[] : output of model
    # OMAX : maximum frequency
    # N : number of sample points
    # AC[],BC[]: fourier coefficients
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------
    methd = 'FAST'
    method = '9'
    
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------
    
    #
    MI = 4#: maximum number of fourier coefficients that may be retained in
    # calculating the partial variances without interferences between the assigned frequencies
    #
    # Frequency assignment to input factors.
    OM = SETFREQ(inpt)
    # Computation of the maximum frequency
    # OMAX and the no. of sample points N.
    OMAX = int(OM[inpt-1])
    N = 2 * MI * OMAX + 1
    # Setting the relation between the scalar variable S and the coordinates
    # {X(1),X(2),...X(inpt)} of each sample point.
    S = pi / 2.0 * (2 * arange(1,N+1) - N-1) / N
    ANGLE = matrix(OM).T * matrix(S)
    X = 0.5 + arcsin(sin(ANGLE.T)) / pi
    # Transform distributions from standard uniform to general.

    for j in range(inpt):    
        if stvars[j].dist == 'NORM':
            X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])        
        elif stvars[j].dist == 'LNORM':        
            X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':        
            X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':        
            X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # Do the N model evaluations.
    Y = zeros((N, otpt))        
    if krig == 1:            
        load("dmodel")            
        Y = predictor(X, dmodel)            
    else:
        values = []            
        for p in range(N):
#            print 'Running simulation on test',p+1,'of',N
#            Y[p] = run_model(driver, array(X[p])[0])
            values.append(array(X[p])[0])
        Y = run_list(driver, values)

    # Computation of Fourier coefficients.
    AC = zeros((N, otpt))# initially zero
    BC = zeros((N, otpt))# initially zero
#    q = int(N / 2)-1
    q = (N-1)/2
    for j in range(2,N+1,2):    # j is even
#        print "Y[q]",Y[q]
#        print "matrix(cos(pi * j * arange(1,q+) / N))",matrix(cos(pi * j * arange(1,q+1) / N))
#        print "matrix(Y[q + arange(0,q)] + Y[q - arange(0,q)])",matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)])
        AC[j-1] = 1.0 / N * matrix(Y[q] + matrix(cos(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)]))
    for j in range(1,N+1,2):    # j is odd
        BC[j-1] = 1.0 / N * matrix(sin(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] - Y[q - arange(1,q+1)])

    # Computation of the general variance V in the frequency domain.
    V = 2 * (matrix(AC).T * matrix(AC) + matrix(BC).T * matrix(BC))
    # Computation of the partial variances and sensitivity indices.
    # Si=zeros(inpt,otpt);
    Si = zeros((otpt,otpt,inpt));
    for i in range(inpt):    
        Vi = zeros((otpt, otpt))    
        for j in range(1,MI+1): 
            idx = j * OM[i]-1     
            Vi = Vi + AC[idx].T * AC[idx] + BC[idx].T * BC[idx]
        Vi = 2. * Vi
        Si[:, :, i] = Vi / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------
    
    Sti = []# appears right after the call to this method in the original PCC_Computation.m
    
#    if plotf == 1:    
#        piecharts(inpt, otpt, Si, Sti, method, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):        
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T

    Results = {'FirstOrderSensitivity': Si}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #24
0
expected_demand = 200

# triang
a = 100
b = 250
c = 200

# PART 2

from scipy.stats import norm

cs = price - cost
ce = cost - g

cr = cs / (cs + ce)

k = norm.ppf(cr)

Q2 = 220 + k * 30
print(Q2)

# PART 3
Q3 = cr * 300
print(Q3)

# PART 4
loc = 0
scale = 300
from scipy.stats import uniform
Q4 = uniform.ppf(0.20, loc=loc, scale=scale)
print(Q4)
Пример #25
0
 def ppf(self, quantiles):
     x = uniform.ppf(quantiles, loc=self.a, scale=(self.b-self.a))
     return x
Пример #26
0
def SA_EFAST(driver):

    #[SI,STI] = EFAST(K,WANTEDN)
    # First order and total effect indices for a given model computed with
    # Extended Fourier Amplitude Sensitivity Test (EFAST).
    # Andrea Saltelli, Stefano Tarantola and Karen Chan. 1999
    # A quantitative model-independent method for global sensitivity analysis of model output.
    # Technometrics 41:39-56
    #
    # Input:
    # inpt : no. of input factors
    # WANTEDN : wanted no. of sample points
    #
    # Output:
    # SI[] : first order sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # OM[] : vector of inpt frequencies
    # OMI : frequency for the group of interest
    # OMCI[] : set of freq. used for the compl. group
    # X[] : parameter combination rank matrix
    # AC[],BC[]: fourier coefficients
    # FI[] : random phase shift
    # V : total output variance (for each curve)
    # VI : partial var. of par. i (for each curve)
    # VCI : part. var. of the compl. set of par...
    # AV : total variance in the time domain
    # AVI : partial variance of par. i
    # AVCI : part. var. of the compl. set of par.
    # Y[] : model output
    # N : no. of runs on each curve

    # ----------------------  Setup  ---------------------------
    methd = 'EFAST'
    method = '10'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------

    NR = 1#: no. of search curves
    MI = 4#: maximum number of fourier coefficients that may be retained in calculating
    # the partial variances without interferences between the assigned frequencies
    #
    # Computation of the frequency for the group of interest OMi and the no. of sample points N.
    OMi = int(floor((nEFAST / NR - 1) / (2 * MI) / inpt))
    N = 2 * MI * OMi + 1
    total_sims = N*NR*inpt
    sim = 0
    if (N * NR < 65):
        logging.error('sample size must be >= 65 per factor.')
        raise ValueError,'sample size must be >= 65 per factor.'

    # Algorithm for selecting the set of frequencies. OMci(i), i=1:inpt-1, contains
    # the set of frequencies to be used by the complementary group.
    OMci = SETFREQ(N - 1, OMi / 2 / MI)
    # Loop over the inpt input factors.
    Si = zeros((otpt,otpt,inpt));
    Sti = zeros((otpt,otpt,inpt));
    for i in range(inpt):
        # Initialize AV,AVi,AVci to zero.
        AV = 0
        AVi = 0
        AVci = 0
        # Loop over the NR search curves.
        for L in range(NR):
            # Setting the vector of frequencies OM for the inpt factors.
            cj = 1
            OM = zeros(inpt)
            for j in range(inpt):
                if (j == i):
                    # For the factor of interest.
                    OM[i] = OMi
                else:
                    # For the complementary group.
                    OM[j] = OMci[cj]
                    cj = cj + 1
            # Setting the relation between the scalar variable S and the coordinates
            # {X(1),X(2),...X(inpt)} of each sample point.
            FI = zeros(inpt)
            for j in range(inpt):
                FI[j] = random.random() * 2 * pi        # random phase shift
            S_VEC = pi * (2 * arange(1,N+1) - N - 1) / N
            OM_VEC = OM[range(inpt)]
            FI_MAT = transpose(array([FI]*N))
            ANGLE = matrix(OM_VEC).T*matrix(S_VEC) + matrix(FI_MAT)
            X = 0.5 + arcsin(sin(ANGLE.T)) / pi
            # Transform distributions from standard uniform to general.

            for j in range(inpt):
                if stvars[j].dist == 'NORM':
                    X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
                elif stvars[j].dist == 'LNORM':
                    X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
                elif stvars[j].dist == 'BETA':
                    X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
                elif stvars[j].dist == 'UNIF':
                    X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

            # Do the N model evaluations.
            Y = zeros((N, otpt))
            if krig == 1:
                load("dmodel")
                Y = predictor(X, dmodel)
            else:
                values = []
                for p in range(N):
#                    sim += 1
#                    print 'Running simulation on test',sim,'of',total_sims
#                    Y[p] = run_model(driver, array(X[p])[0])
                    values.append(array(X[p])[0])
                Y = run_list(driver, values)

            # Subtract the average value.
            Y = Y - kron(mean(Y,0), ones((N, 1)))

            # Fourier coeff. at [1:OMi/2].
            NQ = int(N / 2)-1
            N0 = NQ + 1
            COMPL = 0
            Y_VECP = Y[N0+1:] + Y[NQ::-1]
            Y_VECM = Y[N0+1:] - Y[NQ::-1]
#            AC = zeros((int(ceil(OMi / 2)), otpt))
#            BC = zeros((int(ceil(OMi / 2)), otpt))
            AC = zeros((OMi * MI, otpt))
            BC = zeros((OMi * MI, otpt))
            for j in range(int(ceil(OMi / 2))+1):
                ANGLE = (j+1) * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j] = (Y[N0] +matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j]).T * matrix(AC[j]) + matrix(BC[j]).T * matrix(BC[j])
            # Computation of V_{(ci)}.
            Vci = 2 * COMPL
            AVci = AVci + Vci
            # Fourier coeff. at [P*OMi, for P=1:MI].
            COMPL = 0
# Do these need to be recomputed at all?
#            Y_VECP = Y[N0 + range(NQ)] + Y[N0 - range(NQ)]
#            Y_VECM = Y[N0 + range(NQ)] - Y[N0 - range(NQ)]
            for j in range(OMi, OMi * MI + 1, OMi):
                ANGLE = j * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j-1] = (Y[N0] + matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j-1] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j-1]).T * matrix(AC[j-1]) + matrix(BC[j-1]).T * matrix(BC[j-1])
            # Computation of V_i.
            Vi = 2 * COMPL
            AVi = AVi + Vi
            # Computation of the total variance in the time domain.
            AV = AV +  matrix(Y).T * matrix(Y) / N
        # Computation of sensitivity indicies.
        AV = AV / NR
        AVi = AVi / NR
        AVci = AVci / NR
        Si[:, :, i] = AVi / AV
        Sti[:, :, i] = 1 - AVci / AV

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

#    if plotf == 1:
#        piecharts(inpt, otpt, Si, Sti, methd, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T
    if simple == 1:
        Sti_t = zeros((inpt,otpt))
        for p in range(inpt):
            Sti_t[p] = diag(Sti[:, :, p])
        Sti = Sti_t.T
    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #27
0
Файл: td1.py Проект: linkzl/insa
# 'lw' = 'linewidth'


# ============================================= #
# ============= UNIFORME CONTINUE ============= #
# ============================================= #

fig, ax = plt.subplots(1, 1)

#### Calcul de différentes valeurs statistiques
# mean, variance, skew & kurtosis
# = moyenne, variance, coef d'asymétrie, coef d'aplatissement
mean, var, skew, kurt = uniform.stats(moments='mvsk')

# Création d'un ensemble de valeurs équitablement espacées
x = np.linspace(uniform.ppf(0.01), uniform.ppf(0.99), 100)

ax.plot(x, uniform.pdf(x),'r-', lw=5, alpha=0.6, label='uniform pdf')


# L'appel de la fonction permet de recevoir une version 'frozen' de la PDF
rv = uniform()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

vals = uniform.ppf([0.001, 0.5, 0.999])

# Return True ou False si les elements sont d'un vecteur sont egaux (a tolerance pres)
np.allclose([0.001, 0.5, 0.999], uniform.cdf(vals))


# Retourne des variables aleatoires
Пример #28
0
 def _get_theta(self):
     # Nf random numbers from the interval (0,1)
     rn = random.rand(self.Nf)   
     theta_rand = uniform.ppf(rn, self.theta_loc, scale = self.theta_scale)
     return theta_rand
Пример #29
0
def UP_MCS(problem, driver):
    # Uses the MCS method for UP

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    #*****************RANDOM DRAWS FROM INPUT DISTRIBUTIONS********************
    value = asarray(LHS.LHS(inpt, nMCS))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:, j] = norm.ppf(uniform.cdf(value[:, j], 0, 1),
                                   stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:, j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[1], 0,
                                      exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:, j] = beta.ppf(uniform.cdf(value[:, j], 0,
                                               1), stvars[j].param[0],
                                   stvars[j].param[1], stvars[j].param[2],
                                   stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:, j] = uniform.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[0], stvars[j].param[1])

    # ----------------------  Model  ---------------------------

    out = zeros((nMCS, otpt))
    if krig == 1:
        load("dmodel")
        out = predictor(value, dmodel)
    else:
        #        for i in range(nMCS):
        #            print 'Running simulation', i+1, 'of', nMCS, 'with inputs', value[i]
        #            out[i] = run_model(driver, value[i])
        out = run_list(problem, driver, value)

    limstate = asarray(limstate)
    limstate1 = asarray(kron(limstate[:, 0],
                             ones(nMCS))).reshape(otpt, nMCS).transpose()
    limstate2 = asarray(kron(limstate[:, 1],
                             ones(nMCS))).reshape(otpt, nMCS).transpose()
    B = logical_and(greater_equal(out, limstate1), less_equal(out, limstate2))
    PCC = sum(B, 0) / nMCS
    B_t = B[sum(B, 1) == otpt]
    if otpt > 1 and not 0 in PCC[0:otpt]:
        PCC = append(PCC, len(B_t) / nMCS)

    #Moments
    CovarianceMatrix = matrix(cov(out, None, 0))  #.transpose()
    Moments = {
        'Mean': mean(out, 0),
        'Variance': diag(CovarianceMatrix),
        'Skewness': skew(out),
        'Kurtosis': kurtosis(out, fisher=False)
    }

    # combine the display of the correlation matrix with setting a var that will be needed below
    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance'] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    C_Y = [0] * otpt
    for k in range(0, otpt):
        if Moments['Variance'][k] != 0:
            C_Y[k] = estimate_complexity.with_samples(out[:, k], nMCS)

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(
        invalid='ignore'
    )  # ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())

    Distribution = {'Complexity': C_Y}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None,
                              CorrelationMatrix)

    Results = {
        'Moments': Moments,
        'CorrelationMatrix': CorrelationMatrix,
        'CovarianceMatrix': CovarianceMatrix,
        'Distribution': Distribution,
        'PCC': PCC
    }

    return Results
Пример #30
0
 def tune_scale(self, u: float, y: float):
     self.scale = abs(u - y) / uniform.ppf(q=((self.p + 1) / 2))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 12:52:16 2020

@author: guillem
"""

# DISTRIBUCIÓN UNIFORME

from scipy.stats import uniform

uniform.pdf(0.5, loc=-1, scale=2)

uniform.ppf(0.5, loc=-1, scale=2)

uniform.rvs(size=30, loc=-1, scale=2)

# DISTRIBUCIÓN EXPONENCIAL

from scipy.stats import expon
expon.pdf(0.0001, scale=1. / 3)

expon.cdf(0.5, scale=1. / 3)

expon.rvs(scale=1. / 3, size=10)

# DISTRIBUCIÓN NORMAL

from scipy.stats import norm
Пример #32
0
def inverse_hyper(hyper_prob):
	pr_a, pr_b, pr_c = hyper_prob
	a, b, c = uniform.ppf([pr_a, pr_b, pr_c], 0., 10.)
	
	hyper = np.array([a, b, c])
	return hyper
Пример #33
0
def SA_FAST(problem, driver):

    # First order indicies for a given model computed with Fourier Amplitude Sensitivity Test (FAST).
    # R. I. Cukier, C. M. Fortuin, Kurt E. Shuler, A. G. Petschek and J. H. Schaibly.
    # Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients.
    # I-III Theory/Applications/Analysis The Journal of Chemical Physics
    #
    # Input:
    # inpt : no. of input factors
    #
    # Output:
    # SI[] : sensitivity indices
    # Other used variables/constants:
    # OM[] : frequencies of parameters
    # S[] : search curve
    # X[] : coordinates of sample points
    # Y[] : output of model
    # OMAX : maximum frequency
    # N : number of sample points
    # AC[],BC[]: fourier coefficients
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------
    methd = 'FAST'
    method = '9'

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    # ----------------------  Model  ---------------------------

    #
    MI = 4  #: maximum number of fourier coefficients that may be retained in
    # calculating the partial variances without interferences between the assigned frequencies
    #
    # Frequency assignment to input factors.
    OM = SETFREQ(inpt)
    # Computation of the maximum frequency
    # OMAX and the no. of sample points N.
    OMAX = int(OM[inpt - 1])
    N = 2 * MI * OMAX + 1
    # Setting the relation between the scalar variable S and the coordinates
    # {X(1),X(2),...X(inpt)} of each sample point.
    S = pi / 2.0 * (2 * arange(1, N + 1) - N - 1) / N
    ANGLE = matrix(OM).T * matrix(S)
    X = 0.5 + arcsin(sin(ANGLE.T)) / pi
    # Transform distributions from standard uniform to general.

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            X[:, j] = norm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0],
                               stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            X[:, j] = lognorm.ppf(uniform.cdf(X[:, j], 0,
                                              1), stvars[j].param[1], 0,
                                  exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            X[:, j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0],
                               stvars[j].param[1], stvars[j].param[2],
                               stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            X[:, j] = uniform.ppf(uniform.cdf(X[:, j], 0, 1),
                                  stvars[j].param[0], stvars[j].param[1])

    # Do the N model evaluations.
    Y = zeros((N, otpt))
    if krig == 1:
        load("dmodel")
        Y = predictor(X, dmodel)
    else:
        values = []
        for p in range(N):
            #            print 'Running simulation on test',p+1,'of',N
            #            Y[p] = run_model(driver, array(X[p])[0])
            values.append(array(X[p])[0])
        Y = run_list(problem, driver, values)

    # Computation of Fourier coefficients.
    AC = zeros((N, otpt))  # initially zero
    BC = zeros((N, otpt))  # initially zero
    #    q = int(N / 2)-1
    q = (N - 1) / 2
    for j in range(2, N + 1, 2):  # j is even
        #        print "Y[q]",Y[q]
        #        print "matrix(cos(pi * j * arange(1,q+) / N))",matrix(cos(pi * j * arange(1,q+1) / N))
        #        print "matrix(Y[q + arange(0,q)] + Y[q - arange(0,q)])",matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)])
        AC[j - 1] = 1.0 / N * matrix(
            Y[q] + matrix(cos(pi * j * arange(1, q + 1) / N)) *
            matrix(Y[q + arange(1, q + 1)] + Y[q - arange(1, q + 1)]))
    for j in range(1, N + 1, 2):  # j is odd
        BC[j - 1] = 1.0 / N * matrix(sin(
            pi * j * arange(1, q + 1) / N)) * matrix(Y[q + arange(1, q + 1)] -
                                                     Y[q - arange(1, q + 1)])

    # Computation of the general variance V in the frequency domain.
    V = 2 * (matrix(AC).T * matrix(AC) + matrix(BC).T * matrix(BC))
    # Computation of the partial variances and sensitivity indices.
    # Si=zeros(inpt,otpt);
    Si = zeros((otpt, otpt, inpt))
    for i in range(inpt):
        Vi = zeros((otpt, otpt))
        for j in range(1, MI + 1):
            idx = j * OM[i] - 1
            Vi = Vi + AC[idx].T * AC[idx] + BC[idx].T * BC[idx]
        Vi = 2. * Vi
        Si[:, :, i] = Vi / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

    Sti = [
    ]  # appears right after the call to this method in the original PCC_Computation.m

    #    if plotf == 1:
    #        piecharts(inpt, otpt, Si, Sti, method, output)
    if simple == 1:
        Si_t = zeros((inpt, otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T

    Results = {'FirstOrderSensitivity': Si}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #34
0
print("numpy or scipy?")
selct = input()

#numpyを使って一様乱数の生成
if selct == "numpy":
    #plt.rcParams['figure.figsize'] = (10, 10)
    np.random.seed()
    N = 10000
    x = np.random.uniform(0.0, 1.0, N)
    nbins = 50
    plt.hist(x, nbins, normed=True, label="frozen pdf")
    plt.show()

#scipy.stats用いて一様乱数の生成
if selct == "scipy":
    #一様分布に従う確率分布からランダムサンプリング
    np.random.seed()
    N = 10000
    #[0.0, 1.0]の一様分布に従う確率変数
    rv = uniform(loc=0.0, scale=1.0)
    #一様分布からサンプリング
    x = rv.rvs(size=N)
    nbins = 50
    plt.hist(x, nbins, normed=True, label='frozen pdf')

    #真のPDFを描画
    x = np.linspace(uniform.ppf(0.01), uniform.ppf(0.99), 100)
    plt.plot(x, uniform.pdf(x), 'r-', lw=4, label='uniform pdf')  #lwは線の太さ
    plt.legend()
    plt.show()
Пример #35
0
          fontsize=16)
plt.xticks(np.arange(-1, 5, 1))
plt.yticks(np.arange(0, 1.1, 0.1))
plt.legend(loc='upper left', shadow=True)

plt.show()

# ### Uniform (Continuous) Distribution

# In[8]:

#Uniform (Continuous) Distribution
from scipy.stats import uniform

loc, scale = 1, 10
x = np.linspace(uniform.ppf(0.01, loc, scale), uniform.ppf(0.99, loc, scale),
                100)  #Percent Point Function (inverse of cdf — percentiles)

print("Mean              : ", uniform.stats(loc, scale, moments='m'))
print("Variance          : ", uniform.stats(loc, scale, moments='v'))
print("Prob. Dens. Func. : ", uniform.pdf(x, loc, scale))
print("Cum. Density Func.: ", uniform.cdf(x, loc, scale))

CDF = randint.cdf(x, loc, scale)

fig = plt.figure(figsize=(20, 10))
plt.subplot(221)
plt.plot(x, uniform.pdf(x, loc, scale), 'g', ms=8, label='PDF')
plt.vlines(loc, 0, 0.1, colors='g', lw=5, alpha=0.5, linestyle='dashed')
plt.vlines(scale + 1, 0, 0.1, colors='g', lw=5, alpha=0.5, linestyle='dashed')
plt.xlabel("Sample Space of Continuous Uniform Distribution", fontsize=14)
Пример #36
0
def UP_MCS(driver):
    # Uses the MCS method for UP

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    #*****************RANDOM DRAWS FROM INPUT DISTRIBUTIONS********************
    value = asarray(LHS.LHS(inpt, nMCS))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # ----------------------  Model  ---------------------------

    out = zeros((nMCS, otpt))
    if krig == 1:
        load("dmodel")
        out = predictor(value, dmodel)
    else:
#        for i in range(nMCS):
#            print 'Running simulation',i+1,'of',nMCS,'with inputs',value[i]
#            out[i] = run_model(driver, value[i])
        out = run_list(driver, value)

    limstate = asarray(limstate)
    limstate1 = asarray(kron(limstate[:, 0], ones(nMCS))).reshape(otpt,nMCS).transpose()
    limstate2 = asarray(kron(limstate[:, 1], ones(nMCS))).reshape(otpt,nMCS).transpose()
    B = logical_and(greater_equal(out,limstate1),less_equal(out,limstate2))
    PCC = sum(B,0) / nMCS
    B_t = B[sum(B,1) == otpt]
    if otpt > 1 and not 0 in PCC[0:otpt]:
        PCC = append(PCC,len(B_t) / nMCS)

    #Moments
    CovarianceMatrix = matrix(cov(out,None,0))#.transpose()
    Moments = {'Mean': mean(out,0), 'Variance': diag(CovarianceMatrix), 'Skewness': skew(out), 'Kurtosis': kurtosis(out,fisher=False)}

    # combine the display of the correlation matrix with setting a var that will be needed below
    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    C_Y = [0]*otpt
    for k in range(0,otpt):
        if Moments['Variance'][k]!=0:
            C_Y[k] = estimate_complexity.with_samples(out[:,k],nMCS)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    Distribution = {'Complexity': C_Y}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)
			
    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'PCC': PCC}

    return Results
Пример #37
0
 def compute_inv_cdf(self, x_grid):
     """
     Returns np array of inverse uniform CDF values at pts in x_grid
     """
     return scipy_uniform.ppf(x_grid, self._minimum_value, self._range_size)
Пример #38
0
    def inverse_transform(self, values):
        """
        Uses inverse probability integral transformation on the provided values.
        """
        result = None

        if self.distribution == 'normal':
            mu, sig = self.theta

            if self.truncation_limits is not None:
                a, b = self.truncation_limits

                if a is None:
                    a = -np.inf
                if b is None:
                    b = np.inf

                p_a, p_b = [norm.cdf((lim - mu) / sig) for lim in [a, b]]

                if p_b - p_a == 0:
                    raise ValueError(
                        "The probability mass within the truncation limits is "
                        "too small and the truncated distribution cannot be "
                        "sampled with sufficiently high accuracy. This is most "
                        "probably due to incorrect truncation limits set for "
                        "the distribution.")

                result = norm.ppf(values * (p_b - p_a) + p_a,
                                  loc=mu,
                                  scale=sig)

            else:
                result = norm.ppf(values, loc=mu, scale=sig)

        elif self.distribution == 'lognormal':
            theta, beta = self.theta

            if self.truncation_limits is not None:
                a, b = self.truncation_limits

                if a is None:
                    a = np.nextafter(0, 1)
                else:
                    a = np.maximum(np.nextafter(0, 1), a)

                if b is None:
                    b = np.inf

                p_a, p_b = [
                    norm.cdf((np.log(lim) - np.log(theta)) / beta)
                    for lim in [a, b]
                ]

                result = np.exp(
                    norm.ppf(values * (p_b - p_a) + p_a,
                             loc=np.log(theta),
                             scale=beta))

            else:
                result = np.exp(norm.ppf(values, loc=np.log(theta),
                                         scale=beta))

        elif self.distribution == 'uniform':
            a, b = self.theta

            if a is None:
                a = -np.inf
            if b is None:
                b = np.inf

            if self.truncation_limits is not None:
                a, b = self.truncation_limits

            result = uniform.ppf(values, loc=a, scale=b - a)

        elif self.distribution == 'empirical':

            s_ids = (values * len(self._raw_samples)).astype(int)
            result = self._raw_samples[s_ids]

        elif self.distribution == 'coupled_empirical':

            raw_sample_count = len(self._raw_samples)
            new_sample_count = len(values)
            new_samples = np.tile(self._raw_samples,
                                  int(new_sample_count / raw_sample_count) + 1)
            result = new_samples[:new_sample_count]

        elif self.distribution == 'multinomial':

            p_cum = np.cumsum(self.theta)[:-1]

            samples = values

            for i, p_i in enumerate(p_cum):
                samples[samples < p_i] = 10 + i
            samples[samples <= 1.0] = 10 + len(p_cum)

            result = samples - 10

        return result
Пример #39
0
def SA_SOBOL(problem, driver):
    # Uses the Sobel Method for SA.
    # Input:
    # inpt : no. of input factors
    # N: number of Sobel samples
    #
    # Output:
    # SI[] : sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------

    methd = 'SOBOL'
    method = '7'

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    # ----------------------  Model  ---------------------------
    value = asarray(LHS.LHS(2 * inpt, nSOBOL))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:, j] = norm.ppf(uniform.cdf(value[:, j], 0, 1),
                                   stvars[j].param[0], stvars[j].param[1])
            value[:,
                  j + inpt] = norm.ppf(uniform.cdf(value[:, j + inpt], 0, 1),
                                       stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:, j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[1], 0,
                                      exp(stvars[j].param[0]))
            value[:, j + inpt] = lognorm.ppf(
                uniform.cdf(value[:, j + inpt], 0, 1), stvars[j].param[1], 0,
                exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:, j] = beta.ppf(uniform.cdf(value[:, j], 0,
                                               1), stvars[j].param[0],
                                   stvars[j].param[1], stvars[j].param[2],
                                   stvars[j].param[3] - stvars[j].param[2])
            value[:,
                  j + inpt] = beta.ppf(uniform.cdf(value[:, j + inpt], 0,
                                                   1), stvars[j].param[0],
                                       stvars[j].param[1], stvars[j].param[2],
                                       stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:, j] = uniform.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[0], stvars[j].param[1])
            value[:, j + inpt] = uniform.ppf(
                uniform.cdf(value[:, j + inpt], 0, 1), stvars[j].param[0],
                stvars[j].param[1])

    values = []
    XMA = value[0:nSOBOL, 0:inpt]
    XMB = value[0:nSOBOL, inpt:2 * inpt]
    YXMA = zeros((nSOBOL, otpt))
    YXMB = zeros((nSOBOL, otpt))
    if krig == 1:
        load("dmodel")
        YXMA = predictor(XMA, dmodel)
        YXMB = predictor(XMB, dmodel)
    else:
        values.extend(list(XMA))
        values.extend(list(XMB))

    YXMC = zeros((inpt, nSOBOL, otpt))
    for i in range(inpt):
        XMC = deepcopy(XMB)
        XMC[:, i] = deepcopy(XMA[:, i])
        if krig == 1:
            YXMC[i] = predictor(XMC, dmodel)
        else:
            values.extend(list(XMC))

    if krig != 1:
        out = iter(run_list(problem, driver, values))
        for i in range(nSOBOL):
            YXMA[i] = out.next()
        for i in range(nSOBOL):
            YXMB[i] = out.next()
        for i in range(inpt):
            for j in range(nSOBOL):
                YXMC[i, j] = out.next()

    f0 = mean(YXMA, 0)
    if otpt == 1:
        V = cov(YXMA, None, 0, 1)
    else:  #multiple outputs
        V = diag(cov(YXMA, None, 0, 1))
    Vi = zeros((otpt, inpt))
    Vci = zeros((otpt, inpt))
    for i in range(inpt):
        for p in range(otpt):
            Vi[p,
               i] = 1.0 / nSOBOL * sum(YXMA[:, p] * YXMC[i, :, p]) - f0[p]**2
            Vci[p,
                i] = 1.0 / nSOBOL * sum(YXMB[:, p] * YXMC[i, :, p]) - f0[p]**2

    Si = zeros((otpt, inpt))
    Sti = zeros((otpt, inpt))
    for j in range(inpt):
        Si[:, j] = Vi[:, j] / V
        Sti[:, j] = 1 - Vci[:, j] / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(XMA, YXMA, otpt, nSOBOL)

# ----------------------  Analyze  ---------------------------

    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Пример #40
0
def _unconditional_samples(
    n_draws,
    n_params,
    dist_type,
    loc,
    scale,
    sampling_scheme,
    seed=0,
    skip=0,
):
    """Generate two independent groups of sample points.

    Parameters
    ----------
    n_draws : int
        Number of Monte Carlo draws.
    n_params : int
        Number of parameters of objective function.
    dist_type : str
        The distribution type of input. Options are "Normal", "Exponential" and "Uniform".
    loc : float or np.ndarray
        The location(`loc`) keyword passed to `scipy.stats.norm`_ function to shift the
        location of "standardized" distribution.
    scale : float or np.ndarray
        The `scale` keyword passed to `scipy.stats.norm`_ function to adjust the scale of
        "standardized" distribution.
    sampling_scheme : str, optional
        One of ["sobol", "random"]
    seed : int, optional
        Random number generator seed. Default is 0.
    skip : int, optional
        Number of values to skip of Sobol sequence. Default is `0`.

    Returns
    -------
    x, x_prime : np.ndarray
        Two arrays of shape (n_draws, n_params) with i.i.d draws from a joint distribution.
    """
    # Generate uniform distributed samples
    np.random.seed(seed)
    if sampling_scheme == "sobol":
        u = cp.generate_samples(
            order=n_draws + skip,
            domain=n_params,
            rule="S",
        ).T
    elif sampling_scheme == "random":
        u = np.random.uniform(size=(n_draws, n_params))
    else:
        raise ValueError("Argument 'sampling_scheme' is not in {'sobol', 'random'}.")

    skip = skip if sampling_scheme == "sobol" else 0

    u = cp.generate_samples(order=n_draws, domain=2 * n_params, rule="S").T
    u_1 = u[skip:, :n_params]
    u_2 = u[skip:, n_params:]

    # Transform uniform draws into a joint PDF
    if dist_type == "Normal":
        z = norm.ppf(u_1)
        z_prime = norm.ppf(u_2)
        cholesky = np.linalg.cholesky(scale)
        x = loc + cholesky.dot(z.T).T
        x_prime = loc + cholesky.dot(z_prime.T).T
    elif dist_type == "Exponential":
        x = expon.ppf(u_1, loc, scale)
        x_prime = expon.ppf(u_2, loc, scale)
    elif dist_type == "Uniform":
        x = uniform.ppf(u_1, loc, scale)
        x_prime = uniform.ppf(u_2, loc, scale)
    else:
        raise NotImplementedError

    return x, x_prime
Пример #41
0
def SA_SOBOL(driver):
    # Uses the Sobel Method for SA.
    # Input:
    # inpt : no. of input factors
    # N: number of Sobel samples
    #
    # Output:
    # SI[] : sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------

    methd = 'SOBOL'
    method = '7'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------
    value = asarray(LHS.LHS(2*inpt, nSOBOL))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = norm.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
            value[:,j+inpt] = lognorm.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
            value[:,j+inpt] = beta.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = uniform.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])

    values = []
    XMA = value[0:nSOBOL, 0:inpt]
    XMB = value[0:nSOBOL, inpt:2 * inpt]
    YXMA = zeros((nSOBOL, otpt))
    YXMB = zeros((nSOBOL, otpt))
    if krig == 1:
        load("dmodel")
        YXMA = predictor(XMA, dmodel)
        YXMB = predictor(XMB, dmodel)
    else:
        values.extend(list(XMA))
        values.extend(list(XMB))

    YXMC = zeros((inpt, nSOBOL, otpt))
    for i in range(inpt):
        XMC = deepcopy(XMB)
        XMC[:, i] = deepcopy(XMA[:, i])
        if krig == 1:
            YXMC[i] = predictor(XMC, dmodel)
        else:
            values.extend(list(XMC))

    if krig != 1:
        out = iter(run_list(driver, values))
        for i in range(nSOBOL):
            YXMA[i] = out.next()
        for i in range(nSOBOL):
            YXMB[i] = out.next()
        for i in range(inpt):
            for j in range(nSOBOL):
                YXMC[i, j] = out.next()

    f0 = mean(YXMA,0)
    if otpt==1:
        V = cov(YXMA,None,0,1)
    else:  #multiple outputs
        V = diag(cov(YXMA,None,0,1))
    Vi = zeros((otpt, inpt))
    Vci = zeros((otpt, inpt))
    for i in range(inpt):
        for p in range(otpt):
            Vi[p,i] = 1.0/nSOBOL*sum(YXMA[:,p]*YXMC[i,:,p])-f0[p]**2;
            Vci[p,i]= 1.0/nSOBOL*sum(YXMB[:,p]*YXMC[i,:,p])-f0[p]**2;

    Si = zeros((otpt,inpt));
    Sti = zeros((otpt,inpt));
    for j in range(inpt):
        Si[:, j] = Vi[:, j] / V
        Sti[:, j] = 1 - Vci[:, j] / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(XMA, YXMA, otpt, nSOBOL)

# ----------------------  Analyze  ---------------------------

    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
 def ppf(self, dist, p):
     return uniform.ppf(p, *self._get_params(dist))
Пример #43
0
def Finv_Uniform(r, a, b):
    return uniform.ppf(r, a, b - a)
Пример #44
0
 def ppf(self, quantiles: Vector) -> Vector:
     x = uniform.ppf(quantiles, loc=self.a, scale=(self.b - self.a))
     return x
Пример #45
0
 def compute_inv_CDF(self, x_grid):
     '''
     Returns np array of inverse uniform CDF values at pts in x_grid
     '''
     return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size)
Пример #46
0
ax.set_xlabel('waiting time')
ax.set_ylabel('f(x)')
ax.set_title('exponential pdf')

plt.close('all')

## Uniform distribution ---------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import uniform

# range
a = 3
b = 4 - a
x = np.linspace(uniform.ppf(0.01, loc=a, scale=b),
                uniform.ppf(0.99, loc=a, scale=b), 100)

# visualize
fig, ax = plt.subplots(1, 1)
ax.plot(x, uniform.pdf(x, loc=a, scale=b), 'r-', alpha=0.6, lw=5)
ax.set_xlabel('X')
ax.set_ylabel('f(X)')
ax.set_title('uniform pdf')

## Gamma distribution --------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import gamma
 def ppf(self, dist, p):
     return uniform.ppf(p, *self._get_params(dist))
Пример #48
0
def inverse_local(local_prob, hyper):	
	# M(1,i) for variable mass
	M1 = uniform.ppf(local_prob, -4., 10.)

	return M1