Пример #1
0
def test_dist_sub(distribution):
    """Test distribution subtraction."""
    dist1_e = cp.E(distribution() - 3.0)
    dist2_e = cp.E(3.0 - distribution())
    base_e = cp.E(distribution()) - 3.0
    np.testing.assert_allclose(dist1_e, -dist2_e, rtol=1e-05, atol=1e-08)
    np.testing.assert_allclose(dist2_e, -base_e, rtol=1e-05, atol=1e-08)
Пример #2
0
def test_dist_add(distribution):
    """Test distribution addition."""
    dist1_e = cp.E(distribution() + 2.0)
    dist2_e = cp.E(2.0 + distribution())
    base_e = cp.E(distribution()) + 2.0
    np.testing.assert_allclose(dist1_e, dist2_e, rtol=1e-05, atol=1e-08)
    np.testing.assert_allclose(dist2_e, base_e, rtol=1e-05, atol=1e-08)
Пример #3
0
def sens(xdict, funcs):
    dv = xdict['xvars']  # Get the design variable out
    UQObj.QoI.p['oas_example1.wing.twist_cp'] = dv
    obj_func = UQObj.QoI.eval_ObjGradient
    con_func = UQObj.QoI.eval_ConstraintQoIGradient
    funcsSens = {}

    # Objective function
    # Full integration
    g_mu_j = collocation_grad_obj.normal.mean(cp.E(UQObj.jdist),
                                              cp.Std(UQObj.jdist), obj_func)
    g_var_j = collocation_grad_obj.normal.variance(obj_func, UQObj.jdist,
                                                   g_mu_j)
    # # Reduced integration
    # g_mu_j = collocation_grad_obj.normal.reduced_mean(obj_func, UQObj.jdist, UQObj.dominant_space)
    # g_var_j = collocation_grad_obj.normal.reduced_variance(obj_func, UQObj.jdist, UQObj.dominant_space, g_mu_j)

    funcsSens['obj', 'xvars'] = g_mu_j + 2 * np.sqrt(
        g_var_j
    )  # collocation_grad_obj.normal.reduced_mean(obj_func, UQObj.jdist, UQObj.dominant_space)

    # Constraint function
    # Full integration
    funcsSens['con', 'xvars'] = collocation_grad_con.normal.mean(
        cp.E(UQObj.jdist), cp.Std(UQObj.jdist), con_func)
    # # Reduced integration
    # funcsSens['con', 'xvars'] = collocation_grad_con.normal.reduced_mean(con_func, UQObj.jdist, UQObj.dominant_space)
    fail = False
    return funcsSens, fail
Пример #4
0
def objfunc(xdict):
    dv = xdict['xvars']  # Get the design variable out
    UQObj.QoI.p['oas_example1.wing.twist_cp'] = dv
    obj_func = UQObj.QoI.eval_QoI
    con_func = UQObj.QoI.eval_ConstraintQoI
    funcs = {}

    # Objective function

    # Full integration
    mu_j = collocation_obj.normal.mean(cp.E(UQObj.jdist), cp.Std(UQObj.jdist),
                                       obj_func)
    var_j = collocation_obj.normal.variance(obj_func, UQObj.jdist, mu_j)
    # # Reduced integration
    # mu_j = collocation_obj.normal.reduced_mean(obj_func, UQObj.jdist, UQObj.dominant_space)
    # var_j = collocation_obj.normal.reduced_variance(obj_func, UQObj.jdist, UQObj.dominant_space, mu_j)
    funcs['obj'] = mu_j + 2 * np.sqrt(var_j)

    # Constraint function
    # Full integration
    funcs['con'] = collocation_con.normal.mean(cp.E(UQObj.jdist),
                                               cp.Std(UQObj.jdist), con_func)
    # # Reduced integration
    # funcs['con'] = collocation_con.normal.reduced_mean(con_func, UQObj.jdist, UQObj.dominant_space)
    fail = False
    return funcs, fail
Пример #5
0
def test_constant_expected():
    """Test if polynomial constant behave as expected."""
    distribution = chaospy.J(chaospy.Uniform(-1.2, 1.2),
                             chaospy.Uniform(-2.0, 2.0))
    const = chaospy.polynomial(7.)
    assert chaospy.E(const, distribution[0]) == const
    assert chaospy.E(const, distribution) == const
    assert chaospy.Var(const, distribution) == 0.
Пример #6
0
def test_dist_add():

    for name, dist in zip(dist_names, dists):
        dist1_e = cp.E(dist() + 2.0)
        dist2_e = cp.E(2.0 + dist())
        base_e = cp.E(dist()) + 2.0
        np.testing.assert_allclose(dist1_e, dist2_e, rtol=1e-05, atol=1e-08)
        np.testing.assert_allclose(dist2_e, base_e, rtol=1e-05, atol=1e-08)
Пример #7
0
def test_dist_sub():

    for name, dist in zip(dist_names, dists):
        dist1_e = cp.E(dist() - 3.0)
        dist2_e = cp.E(3.0 - dist())
        base_e = cp.E(dist()) - 3.0
        np.testing.assert_allclose(dist1_e, -dist2_e, rtol=1e-05, atol=1e-08)
        np.testing.assert_allclose(dist2_e, -base_e, rtol=1e-05, atol=1e-08)
Пример #8
0
def test_analytical_stieltjes(analytical_distribution):
    """Assert that Analytical Stieltjes produces orthogonality."""
    coeffs, [orth], norms = chaospy.analytical_stieltjes(
        order=4, dist=analytical_distribution)
    assert orth[0] == 1
    assert numpy.allclose(chaospy.E(orth[1:], analytical_distribution), 0)
    covariance = chaospy.E(
        numpoly.outer(orth[1:], orth[1:]), analytical_distribution)
    assert numpy.allclose(numpy.diag(numpy.diag(covariance)), covariance)
    assert numpy.allclose(numpoly.lead_coefficient(orth), 1)
Пример #9
0
def OptCostRS(gain, pdf):

    polynomials = cp.orth_ttr(order=2, dist=pdf)
    samples, weights = cp.generate_quadrature(order=2,
                                              domain=pdf,
                                              rule="Gaussian")
    stateTensor = [OptCost(s, gain) for s in samples.T]
    # stateTensor = pool.map(OptCost,samples.T)
    PCE = cp.fit_quadrature(polynomials, samples, weights, stateTensor)

    print "\nGain = {}".format(gain)
    print "PCE Expectation: {} ".format(cp.E(poly=PCE, dist=pdf))
    return cp.E(poly=PCE, dist=pdf)
Пример #10
0
def test_distribution_subtraction(distribution):
    """Test distribution subtraction."""
    right_subtraction = chaospy.E(distribution() - 3.0)
    left_subtraction = chaospy.E(3.0 - distribution())
    reference = chaospy.E(distribution()) - 3.0
    numpy.testing.assert_allclose(right_subtraction,
                                  -left_subtraction,
                                  rtol=1e-05,
                                  atol=1e-08)
    numpy.testing.assert_allclose(left_subtraction,
                                  -reference,
                                  rtol=1e-05,
                                  atol=1e-08)
Пример #11
0
def test_distribution_addition(distribution):
    """Assert adding."""
    right_addition = chaospy.E(distribution() + 2.0)
    left_addition = chaospy.E(2.0 + distribution())
    reference = chaospy.E(distribution()) + 2.0
    numpy.testing.assert_allclose(right_addition,
                                  left_addition,
                                  rtol=1e-05,
                                  atol=1e-08)
    numpy.testing.assert_allclose(left_addition,
                                  reference,
                                  rtol=1e-05,
                                  atol=1e-08)
Пример #12
0
def Kurt(poly, dist=None, fisher=True, **kws):
    """
    Kurtosis operator.

    Element by element 4rd order statistics of a distribution or polynomial.

    Args:
        poly (Poly, Dist) : Input to take kurtosis on.
        dist (Dist) : Defines the space the skewness is taken on.
                It is ignored if `poly` is a distribution.
        fisher (bool) : If True, Fisher's definition is used (Normal -> 0.0).
                If False, Pearson's definition is used (normal -> 3.0)
        **kws (optional) : Extra keywords passed to dist.mom.

    Returns:
        (ndarray) : Element for element variance along `poly`, where
                `skewness.shape==poly.shape`.

    Examples:
        >>> x = cp.variable()
        >>> Z = cp.Uniform()
        >>> print(np.around(cp.Kurt(Z), 8))
        -1.2
        >>> Z = cp.Normal()
        >>> print(np.around(cp.Kurt(x, Z), 8))
        0.0
    """
    if isinstance(poly, cp.dist.Dist):
        x = cp.poly.variable(len(poly))
        poly, dist = x, poly
    else:
        poly = cp.poly.Poly(poly)

    if fisher:
        adjust = 3
    else:
        adjust = 0

    shape = poly.shape
    poly = cp.poly.flatten(poly)

    m1 = cp.E(poly, dist)
    m2 = cp.E(poly**2, dist)
    m3 = cp.E(poly**3, dist)
    m4 = cp.E(poly**4, dist)

    out = (m4-4*m3*m1 + 6*m2*m1**2 - 3*m1**4) /\
            (m2**2-2*m2*m1**2+m1**4) - adjust

    out = np.reshape(out, shape)
    return out
    def solve_nonlinear(self, params, unknowns, resids):

        power = params['power']
        method_dict = params['method_dict']
        dist = method_dict['distribution']
        rule = method_dict['rule']
        n = len(power)
        if rule != 'rectangle':
            points, weights = cp.generate_quadrature(order=n - 1,
                                                     domain=dist,
                                                     rule=rule)
        # else:
        #     points, weights = quadrature_rules.rectangle(n, method_dict['distribution'])

        poly = cp.orth_chol(n - 1, dist)
        # poly = cp.orth_bert(n-1, dist)
        # double check this is giving me good orthogonal polynomials.
        # print poly, '\n'
        p2 = cp.outer(poly, poly)
        # print 'chol', cp.E(p2, dist)
        norms = np.diagonal(cp.E(p2, dist))
        print 'diag', norms

        expansion, coeff = cp.fit_quadrature(poly,
                                             points,
                                             weights,
                                             power,
                                             retall=True,
                                             norms=norms)
        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)

        mean = cp.E(expansion, dist)
        print 'mean cp.E =', mean
        # mean = sum(power*weights)
        print 'mean sum =', sum(power * weights)
        print 'mean coeff =', coeff[0]
        std = cp.Std(expansion, dist)

        print mean
        print std
        print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns['mean'] = mean * hours
        unknowns['std'] = std * hours

        print 'In ChaospyStatistics'
Пример #14
0
 def __init__(self, jdist, distribution_type, QoI_dict, data_type=np.float):
     self.data_type = data_type
     self.n_rv = cp.E(jdist).size
     self.collocation_dict = {}  # Dictionary that will hold all the
     # information pertaining to the different QoI
     if distribution_type == "Normal" or distribution_type == "MvNormal":
         self.collocation_dict = copy.copy(QoI_dict)
         for i in self.collocation_dict:
             quadrature_degree = self.collocation_dict[i][
                 'quadrature_degree']
             q, w = np.polynomial.hermite.hermgauss(quadrature_degree)
             if self.collocation_dict[i]['reduced_collocation'] == False:
                 points, weights = self.getQuadratureInfo(
                     jdist, quadrature_degree, q, w)
             else:
                 print('i = ', i)
                 print('dominant_dir = ',
                       self.collocation_dict[i]['dominant_dir'])
                 dominant_dir = self.collocation_dict[i]['dominant_dir']
                 points, weights = self.getReducedQuadratureInfo(
                     jdist, dominant_dir, quadrature_degree, q, w)
             self.collocation_dict[i]['points'] = points
             self.collocation_dict[i]['quadrature_weights'] = weights
             self.allocateQoISpace(self.collocation_dict[i])
     else:
         raise NotImplementedError
    def __init__(self,
                 jdist,
                 quadrature_degree,
                 distribution_type,
                 QoI_dict,
                 include_derivs=False,
                 reduced_collocation=False,
                 dominant_dir=None,
                 data_type=np.float):
        assert quadrature_degree > 0, "Need at least 1 collocation point for \
                                        uncertainty propagation"

        self.n_rv = cp.E(jdist).size
        self.QoI_dict = utils.copy_qoi_dict(
            QoI_dict)  # copy.copy(QoI_dict) # We don't
        self.distribution_type = distribution_type
        self.data_type = data_type

        # Get the 1D quadrature points based on the distribution being
        # approximated
        if distribution_type == "Normal" or distribution_type == "MvNormal":
            self.q, self.w = np.polynomial.hermite.hermgauss(quadrature_degree)
            if reduced_collocation == False:
                self.getQuadratureInfo(jdist, quadrature_degree, self.q,
                                       self.w)
            else:
                self.getReducedQuadratureInfo(jdist, dominant_dir,
                                              quadrature_degree, self.q,
                                              self.w)

            self.allocateQoISpace(include_derivs)
        else:
            raise NotImplementedError
Пример #16
0
def _gc_correlation_pairwise(
    distributions,
    rho,
    order=15,
    force_calc=False,
):
    assert len(distributions) == 2

    # Check if this is special combination
    special_dist_result = _special_dist(distributions)
    if type(special_dist_result) is bool:
        check_success = False
    else:
        f = special_dist_result
        check_success = True

    # If not force_calc and is special combination
    if force_calc is False and check_success is True:
        result = rho * f
    else:
        arg_1 = np.prod(cp.E(distributions))
        arg_2 = np.sqrt(np.prod(cp.Var(distributions)))
        arg = rho * arg_2 + arg_1

        kwargs = dict()
        kwargs["distributions"] = distributions
        kwargs["order"] = order
        kwargs["arg"] = arg

        grid = np.linspace(-0.99, 0.99, num=199, endpoint=True)
        v_p_criterion = np.vectorize(partial(_criterion, **kwargs))
        result = grid[np.argmin(v_p_criterion(grid))]

    return result
    def reduced_dVariance(self, QoI_func, jdist, dominant_space, mu_j,
                          dQoI_func, dmu_j):
        """
        same as dVariance but in the reduced stochastic space.
        """
        x = cp.E(jdist)
        rv_covariance = cp.Cov(jdist)
        n_quadrature_loops = len(dominant_space.dominant_indices)
        dominant_dir = dominant_space.iso_eigenvecs[:, dominant_space.
                                                    dominant_indices]
        ref_collocation_pts = self.q
        ref_collocation_w = self.w
        idx = 0
        colloc_xi_arr = np.zeros(n_quadrature_loops)
        colloc_w_arr = np.zeros(n_quadrature_loops)
        dvariance_j = np.zeros([self.QoI_dimensions, self.QoI_dimensions])

        idx = self.doReduced_dVariance(x, rv_covariance, dominant_dir, mu_j,
                                       dmu_j, dvariance_j, ref_collocation_pts,
                                       ref_collocation_w, QoI_func, dQoI_func,
                                       colloc_xi_arr, colloc_w_arr, idx)

        assert idx == -1
        dvariance_j[:] = dvariance_j[:] / (np.sqrt(np.pi)**n_quadrature_loops)

        # TODO: figure out a better way of handling arrays and matrices for this routine
        return dvariance_j.diagonal()
    def calcMarginals(self, jdist):
        """
        Compute the marginal density object for the dominant space. The current
        implementation is only for Gaussian distribution.
        """

        marginal_size = len(self.dominant_indices)
        orig_mean = cp.E(jdist)
        orig_covariance = cp.Cov(jdist)

        # Step 1: Rotate the mean & covariance matrix of the the original joint
        # distribution along the eigenve
        dominant_vecs = self.iso_eigenvecs[:, self.dominant_indices]
        marginal_mean = np.dot(dominant_vecs.T, orig_mean)
        marginal_covariance = np.matmul(
            dominant_vecs.T, np.matmul(orig_covariance, dominant_vecs))

        # Step 2: Create the new marginal distribution
        if marginal_size == 1:  # Univariate distributions have to be treated separately
            marginal_std_dev = np.sqrt(np.asscalar(marginal_covariance))
            self.marginal_distribution = cp.Normal(np.asscalar(marginal_mean),
                                                   marginal_std_dev)
        else:
            self.marginal_distribution = cp.MvNormal(marginal_mean,
                                                     marginal_covariance)
    def reduced_mean(self, QoI_func, jdist, dominant_space):

        x = cp.E(jdist)
        covariance = cp.Cov(jdist)
        n_quadrature_loops = len(dominant_space.dominant_indices)
        dominant_dir = dominant_space.iso_eigen_vectors[:, dominant_space.
                                                        dominant_indices]
        ref_collocation_pts = self.q
        ref_collocation_w = self.w
        idx = 0
        colloc_xi_arr = np.zeros(n_quadrature_loops)
        colloc_w_arr = np.zeros(n_quadrature_loops)
        mu_j = np.zeros(self.QoI_dimensions)

        idx = self.doReducedUniformMean(x, covariance, dominant_dir, mu_j,
                                        ref_collocation_pts, ref_collocation_w,
                                        QoI_func, colloc_xi_arr, colloc_w_arr,
                                        idx)

        assert idx == -1
        # TODO: The following scaling doesnt look right, INVESTIGATE
        mu_j[:] = mu_j[:] * (0.5**n_quadrature_loops)

        if len(mu_j) == 1:
            return mu_j[0]
        else:
            return mu_j
Пример #20
0
def KG(z, evls, pnts, gp, kernel, NSAMPS=30, DEG=3, sampling=False):

    # Find initial minimum value from GP model
    min_val = 1e100
    X_sample = pnts
    Y_sample = evls
    #for x0 in [np.random.uniform(XL, XU, size=DIM) for oo in range(20)]:
    x0 = np.random.uniform(XL, XU, size=DIM)
    res = mini(gp, x0=x0,
               bounds=[(XL, XU)
                       for ss in range(DIM)])  #, method='Nelder-Mead')
    #res = mini(expected_improvement, x0=x0[0], bounds=[(XL, XU) for ss in range(DIM)], args=(X_sample, Y_sample, gp))#, callback=callb)
    #   if res.fun < min_val:
    min_val = res.fun
    min_x = res.x

    # estimate min(f^{n+1}) with MC simulation
    MEAN = 0
    points = np.atleast_2d(np.append(X_sample, z)).T
    m, s = gp(z, return_std=True)
    distribution = cp.J(cp.Normal(0, s))
    samples = distribution.sample(NSAMPS, rule='Halton')
    PCEevals = []
    for pp in range(NSAMPS):

        # construct future GP, using z as the next point
        evals = np.append(evls, m + samples[pp])
        #evals = np.append(evls, m + np.random.normal(0, s))
        gpnxt = GaussianProcessRegressor(kernel=kernel,
                                         n_restarts_optimizer=35,
                                         random_state=98765,
                                         normalize_y=True)
        gpnxt.fit(points, evals)

        # convinience function
        def gpf_next(x, return_std=False):
            alph, astd = gpnxt.predict(np.atleast_2d(x), return_std=True)
            alph = alph[0]
            if return_std:
                return (alph, astd)
            else:
                return alph

        res = mini(gpf_next, x0=x0, bounds=[(XL, XU) for ss in range(DIM)])
        min_next_val = res.fun
        min_next_x = res.x

        #print('+++++++++ ', res.fun)
        #MEAN += min_next_val
        PCEevals.append(min_next_val)
    if not sampling:
        polynomial_expansion = cp.orth_ttr(DEG, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
    else:
        MEAN = np.mean(PCEevals)
    #print(PCEevals, '...', MEAN)
    #hey
    #MEAN /= NSAMPS
    return min_val - MEAN
    def dVariance(self, QoI_func, jdist, mu_j, dQoI_func, dmu_j):
        """
        Computes the partial derivative of the variance
        This implementation is CURRENTLY ONLY for QoI's that return scalar values.
        """
        x = cp.E(jdist)
        rv_covariance = cp.Cov(jdist)
        systemsize = x.size
        ref_collocation_pts = self.q
        ref_collocation_w = self.w
        idx = 0
        colloc_xi_arr = np.zeros(systemsize)
        colloc_w_arr = np.zeros(systemsize)
        dvariance_j = np.zeros([self.QoI_dimensions, self.QoI_dimensions])
        # dvariance_j = np.zeros(self.QoI_dimensions)

        idx = self.do_dVariance(x, rv_covariance, mu_j, dmu_j, dvariance_j,
                                ref_collocation_pts, ref_collocation_w,
                                QoI_func, dQoI_func, colloc_xi_arr,
                                colloc_w_arr, idx)
        assert idx == -1
        dvariance_j[:] = dvariance_j[:] / (np.sqrt(np.pi)**systemsize)

        # TODO: figure out a better way of handling arrays and matrices for this routine
        return dvariance_j.diagonal()
Пример #22
0
def EHI(x,
        gp1,
        gp2,
        xi=0.,
        x2=None,
        MD=None,
        NSAMPS=200,
        PCE=False,
        ORDER=2,
        PAR_RES=100):

    mu1, std1 = gp1(x, return_std=True)
    mu2, std2 = gp2(x, return_std=True)

    a, b, c = parEI(gp1, gp2, x2, '', EI=False, MD=MD, PAR_RES=PAR_RES)
    par = b.T[c, :]
    par += xi
    MEAN = 0  # running sum for observed hypervolume improvement
    if not PCE:  # Monte Carlo Sampling
        for ii in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemed from this point
            if idx[-1]:
                MEAN += H(newPar) - H(par)

        return (MEAN / NSAMPS)
    else:
        # Polynomial Chaos
        # (assumes 2 objective functions)
        distribution = cp.J(cp.Normal(0, std1), cp.Normal(0, std2))

        # sparse grid samples
        samples = distribution.sample(NSAMPS, rule='Halton')
        PCEevals = []
        for pp in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemes
            if idx[-1]:
                PCEevals.append(H(newPar) - H(par))
            else:
                PCEevals.append(0)
        polynomial_expansion = cp.orth_ttr(ORDER, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
        return (MEAN)
Пример #23
0
    def expect(self):
        """Return the expected value of the distribution.

        Returns
        -------
        float
            The expected value of the distribution.
        """
        return float(chaos.E(self.dist))
Пример #24
0
def galerkin_approx(coordinates, joint, expansion_small, norms_small):
    alpha, beta = chaospy.variable(2)

    e_alpha_phi = chaospy.E(alpha*expansion_small, joint)
    initial_condition = e_alpha_phi/norms_small

    phi_phi = chaospy.outer(expansion_small, expansion_small)
    e_beta_phi_phi = chaospy.E(beta*phi_phi, joint)

    def right_hand_side(c, t):
        return -numpy.sum(c*e_beta_phi_phi, -1)/norms_small

    coefficients = odeint(
        func=right_hand_side,
        y0=initial_condition,
        t=coordinates,
    )
    return chaospy.sum(expansion_small*coefficients, -1)
Пример #25
0
    def getMCResults4Web(self):
        '''
        Use surrogate model to perform Monte Carlo simulation
        @param nmc, number of Monte Carlo realizations 
        '''
        if (self.co2Model is None):
            self.getMetaModels()

        meanCO2 = cp.E(self.co2Model, self.jointDist)
        stdCO2 = cp.Std(self.co2Model, self.jointDist)

        meanBrine = cp.E(self.brineModel, self.jointDist)
        stdBrine = cp.Std(self.brineModel, self.jointDist)

        return [
            meanCO2[self.nlw:], stdCO2[self.nlw:], meanBrine[self.nlw:],
            stdBrine[self.nlw:]
        ]
Пример #26
0
def test_orth_ttr():
    dist = cp.Normal(0, 1)
    orth = cp.orth_ttr(5, dist)
    outer = cp.outer(orth, orth)
    Cov1 = cp.E(outer, dist)
    Diatoric = Cov1 - np.diag(np.diag(Cov1))
    assert np.allclose(Diatoric, 0)

    Cov2 = cp.Cov(orth[1:], dist)
    assert np.allclose(Cov1[1:,1:], Cov2)
Пример #27
0
def Skew(poly, dist=None, **kws):
    """
    Skewness operator.

    Element by element 3rd order statistics of a distribution or polynomial.

    Args:
        poly (Poly, Dist) : Input to take skewness on.
        dist (Dist) : Defines the space the skewness is taken on.
                It is ignored if `poly` is a distribution.
        **kws (optional) : Extra keywords passed to dist.mom.

    Returns:
        (ndarray) : Element for element variance along `poly`, where
                `skewness.shape==poly.shape`.

    Examples:
        >>> x = cp.variable()
        >>> Z = cp.Gamma()
        >>> print(cp.Skew(Z))
        2.0
    """
    if isinstance(poly, cp.dist.Dist):
        x = cp.poly.variable(len(poly))
        poly, dist = x, poly
    else:
        poly = cp.poly.Poly(poly)

    if poly.dim < len(dist):
        cp.poly.setdim(poly, len(dist))

    shape = poly.shape
    poly = cp.poly.flatten(poly)

    m1 = cp.E(poly, dist)
    m2 = cp.E(poly**2, dist)
    m3 = cp.E(poly**3, dist)
    out = (m3-3*m2*m1+2*m1**3)/(m2-m1**2)**1.5

    out = np.reshape(out, shape)
    return out
Пример #28
0
    def getSamples(self, jdist, include_derivs=False):
        n_rv = cp.E(jdist).shape
        pert = np.zeros(n_rv, dtype=self.data_type)
        # Get the all the function values for the given set of samples

        for i in range(0,self.num_samples):
            for j in self.QoI_dict:
                QoI_func = self.QoI_dict[j]['QoI_func']
                self.QoI_dict[j]['fvals'][i,:] = QoI_func(self.samples[:,i], pert)
                if include_derivs == True:
                    for k in self.QoI_dict[j]['deriv_dict']:
                        dQoI_func = self.QoI_dict[j]['deriv_dict'][k]['dQoI_func']
                        # print('\n j = ', j)
                        # print('self.num_samples = ', self.num_samples)
                        # print('dQoI = ', dQoI_func(self.samples[:,i], pert))
                        self.QoI_dict[j]['deriv_dict'][k]['fvals'][i,:] = dQoI_func(self.samples[:,i], pert)
Пример #29
0
def _gc_correlation_pairwise(distributions, rho, seed=123, num_draws=100000):

    assert len(distributions) == 2

    arg_1 = np.prod(cp.E(distributions))
    arg_2 = np.sqrt(np.prod(cp.Var(distributions)))
    arg = (rho * arg_2 + arg_1)

    kwargs = dict()
    kwargs["args"] = (arg, distributions, seed, num_draws)
    kwargs["bounds"] = (-0.99, 0.99)
    kwargs["method"] = "bounded"

    out = optimize.minimize_scalar(_criterion, **kwargs)
    assert out["success"]

    return out["x"]
def calculate_uqsa_measures(joint_dist, polynomial, alpha=5):
    """ Use chaospy to calculate appropriate indices of uq and sa"""
    dists = joint_dist
    mean = cp.E(polynomial, dists)
    var = cp.Var(polynomial, dists)
    std = cp.Std(polynomial, dists)
    conInt = cp.Perc(polynomial, [alpha / 2., 100 - alpha / 2.], joint_dist)
    sens_m = cp.Sens_m(polynomial, dists)
    sens_m2 = cp.Sens_m2(polynomial, dists)
    sens_t = cp.Sens_t(polynomial, dists)
    return dict(mean=mean,
                var=var,
                std=std,
                conInt=conInt,
                sens_m=sens_m,
                sens_m2=sens_m2,
                sens_t=sens_t)