示例#1
0
def _gc_correlation_pairwise(
    distributions,
    rho,
    order=15,
    force_calc=False,
):
    assert len(distributions) == 2

    # Check if this is special combination
    special_dist_result = _special_dist(distributions)
    if type(special_dist_result) is bool:
        check_success = False
    else:
        f = special_dist_result
        check_success = True

    # If not force_calc and is special combination
    if force_calc is False and check_success is True:
        result = rho * f
    else:
        arg_1 = np.prod(cp.E(distributions))
        arg_2 = np.sqrt(np.prod(cp.Var(distributions)))
        arg = rho * arg_2 + arg_1

        kwargs = dict()
        kwargs["distributions"] = distributions
        kwargs["order"] = order
        kwargs["arg"] = arg

        grid = np.linspace(-0.99, 0.99, num=199, endpoint=True)
        v_p_criterion = np.vectorize(partial(_criterion, **kwargs))
        result = grid[np.argmin(v_p_criterion(grid))]

    return result
示例#2
0
def test_constant_expected():
    """Test if polynomial constant behave as expected."""
    distribution = chaospy.J(chaospy.Uniform(-1.2, 1.2),
                             chaospy.Uniform(-2.0, 2.0))
    const = chaospy.polynomial(7.)
    assert chaospy.E(const, distribution[0]) == const
    assert chaospy.E(const, distribution) == const
    assert chaospy.Var(const, distribution) == 0.
def calculate_uqsa_measures(joint_dist, polynomial, alpha=5):
    """ Use chaospy to calculate appropriate indices of uq and sa"""
    dists = joint_dist
    mean = cp.E(polynomial, dists)
    var = cp.Var(polynomial, dists)
    std = cp.Std(polynomial, dists)
    conInt = cp.Perc(polynomial, [alpha / 2., 100 - alpha / 2.], joint_dist)
    sens_m = cp.Sens_m(polynomial, dists)
    sens_m2 = cp.Sens_m2(polynomial, dists)
    sens_t = cp.Sens_t(polynomial, dists)
    return dict(mean=mean,
                var=var,
                std=std,
                conInt=conInt,
                sens_m=sens_m,
                sens_m2=sens_m2,
                sens_t=sens_t)
示例#4
0
def _gc_correlation_pairwise(distributions, rho, seed=123, num_draws=100000):

    assert len(distributions) == 2

    arg_1 = np.prod(cp.E(distributions))
    arg_2 = np.sqrt(np.prod(cp.Var(distributions)))
    arg = (rho * arg_2 + arg_1)

    kwargs = dict()
    kwargs["args"] = (arg, distributions, seed, num_draws)
    kwargs["bounds"] = (-0.99, 0.99)
    kwargs["method"] = "bounded"

    out = optimize.minimize_scalar(_criterion, **kwargs)
    assert out["success"]

    return out["x"]
示例#5
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get the Polynomial
        P = self.sampler.P

        # Get the PCE variante to use (Regression or Projection)
        regression = self.sampler.regression

        # Compute nodes (and weights)
        if regression:
            nodes = cp.generate_samples(order=self.sampler.n_samples,
                                        domain=self.sampler.distribution,
                                        rule=self.sampler.rule)
        else:
            nodes, weights = cp.generate_quadrature(
                order=self.sampler.quad_order,
                dist=self.sampler.distribution,
                rule=self.sampler.rule,
                sparse=self.sampler.quad_sparse,
                growth=self.sampler.quad_growth)

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame.run_id.unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame['run_id'] == run_id][k]
                samples[k].append(data.values)

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                if samples[k][0].dtype == object:
                    for i in range(self.sampler.count):
                        samples[k][i] = samples[k][i].astype("float64")
                fit = cp.fit_regression(P, nodes, samples[k], "T")
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles (Pxx)
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            ipar = 0
            i = 0
            for param_name in self.sampler.vary.get_keys():
                j = self.sampler.params_size[ipar]
                sobols_first_dict[param_name] = sobols_first_narr[i:i + j]
                sobols_second_dict[param_name] = sobols_second_narr[i:i + j]
                sobols_total_dict[param_name] = sobols_total_narr[i:i + j]
                i += j
                ipar += 1
            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return results






        # Create surrogate model
        #u_hat(x,t;q)=sum_n C_n(x) * P_n(q)
        qoi1_hat =  cp.fit_regression(Polynomials, nodes, solves1,rule=rgm_rule)
        qoi2_hat = cp.fit_regression(Polynomials, nodes, solves2,rule=rgm_rule)
        qoi3_hat =  cp.fit_regression(Polynomials, nodes, solves3,rule=rgm_rule)

       #output qoi metrics
        mean = cp.E(qoi1_hat, joint_KL)
        var = cp.Var(qoi1_hat, joint_KL)
        std = np.sqrt(var)
        cv = np.divide(np.array(std), np.array(mean))
        kurt = cp.Kurt(qoi1_hat,joint_KL)
        skew = cp.Skew(qoi1_hat,joint_KL)
        metrics = np.array([mean, std,var,cv,kurt,skew]).T
        rawdata = np.array(qoi1_hat(*joint_KL.sample(10**5))).T#np.array(solves1)
        prodata =  np.column_stack((qoi_names, metrics.astype(np.object)))
        #prodata[:,1].astype(float)


        # saving qoi results
        txt_file.write("Pressure (kPa) = %f \n" % pressure)
        np.savetxt(txt_file, prodata, fmt=['%s','%.8f','%.8f','%.8f','%.8f','%.8f','%.8f'], delimiter =  "  ",header = "qoi, mean, stdv, vari, cova, kurt, skew")
        txt_file.write("\n")
        txt_file.write("\n")
def test_galerkin_variance(galerkin_approx, joint, true_variance):
    assert numpy.allclose(chaospy.Var(galerkin_approx, joint), true_variance, rtol=1e-12)
示例#8
0
 def get_var(self):
     # return np.sum(self.coefficients**2) - self.get_mean()**2
     return cp.Var(self.f_approx, self.distribution)
def test_spectral_variance(spectral_approx, joint, true_variance):
    assert numpy.allclose(chaospy.Var(spectral_approx, joint), true_variance)
def test_regression_variance(linear_model, joint, true_variance):
    assert numpy.allclose(chaospy.Var(linear_model, joint),
                          true_variance,
                          rtol=3e-1)
samples = dist.sample(10**5)
u_mc = [u(x, *s) for s in samples.T]

mean = np.mean(u_mc, 1)
var = np.var(u_mc, 1)

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
order = 5
P, norms = cp.orth_ttr(order, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1, dist, rule="G")
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

mean = cp.E(U_hat, dist)
var = cp.Var(U_hat, dist)

## Polynomial chaos expansion
## using Point collocation method and quasi-random samples
order = 5
P = cp.orth_ttr(order, dist)
nodes = dist.sample(2 * len(P), "M")
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_regression(P, nodes, solves, rule="T")

mean = cp.E(U_hat, dist)
var = cp.Var(U_hat, dist)

## Polynomial chaos expansion
## using Intrusive Gallerkin method
# :math:
示例#12
0
dist_a = cp.Uniform(0, 0.001)
dist_I = cp.Uniform(10, 16)
dist = cp.J(dist_a, dist_I)

t = np.linspace(0, 1200, 10)

# polynome de chaos
# utilisant la methode Pseudo-Spectrale et une quadrature Gaussienne
ordre = 5
P, norms = cp.orth_ttr(ordre, dist, retall=True)
nodes, weights = cp.generate_quadrature(ordre + 1, dist, rule="G")
solves = [u(t, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

E = cp.E(U_hat, dist)
Var = cp.Var(U_hat, dist)
print('Polynome de chaos utilisant la methode Pseudo-Spectrale')
print('et une quadrature Gaussienne :')
print('E : ', E)
print('Var : ', Var)

# polynome de chaos
# utilisant la methode "Point Collocation" et des tirages pseudo aleatoires
ordre = 5
P = cp.orth_ttr(ordre, dist)
nodes = dist.sample(2 * len(P), "M")
solves = [u(t, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_regression(P, nodes, solves, rule="T")

E = cp.E(U_hat, dist)
Var = cp.Var(U_hat, dist)
示例#13
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame[('run_id', 0)].unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
                samples[k].append(data.values.flatten())

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit = cp.fit_regression(P, nodes, samples[k])
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles: 10% and 90%
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            for i, param_name in enumerate(self.sampler.vary.vary_dict):
                sobols_first_dict[param_name] = sobols_first_narr[i]
                sobols_second_dict[param_name] = sobols_second_narr[i]
                sobols_total_dict[param_name] = sobols_total_narr[i]

            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
示例#14
0
]:
    sample_scheme = 'R'
    # create samples
    samples = jpdf.sample(Ns, sample_scheme)
    # create orthogonal polynomials
    orthogonal_polynomials = cp.orth_ttr(polynomial_order, jpdf)
    # evaluate the model for all samples
    Y_area = model(pressure_range, samples)
    # polynomial chaos expansion
    polynomial_expansion = cp.fit_regression(orthogonal_polynomials, samples,
                                             Y_area.T)

    # calculate statistics
    plotMeanConfidenceAlpha = 5
    expected_value = cp.E(polynomial_expansion, jpdf)
    variance = cp.Var(polynomial_expansion, jpdf)
    standard_deviation = cp.Std(polynomial_expansion, jpdf)
    prediction_interval = cp.Perc(
        polynomial_expansion,
        [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
        jpdf)
    print('{:2.5f} | {:2.5f} : {}'.format(
        np.mean(expected_value) * unit_m2_cm2,
        np.mean(std) * unit_m2_cm2, name))

    # compute sensitivity indices
    S = cp.Sens_m(polynomial_expansion, jpdf)
    ST = cp.Sens_t(polynomial_expansion, jpdf)

    plt.figure('mean')
    plt.plot(pressure_range * unit_pa_mmhg,
示例#15
0
def test_regression_variance(collocation_model, joint, true_variance):
    assert numpy.allclose(chaospy.Var(collocation_model, joint),
                          true_variance,
                          rtol=1e-5)
示例#16
0
def test_descriptives():
    dist = cp.Iid(cp.Normal(), dim)
    orth = cp.expansion.stieltjes(order, dist)
    cp.E(orth, dist)
    cp.Var(orth, dist)
    cp.Cov(orth, dist)
    # perform sparse pseudo-spectral approximation
    for j, n in enumerate(nodes_full.T):
        # each n is a vector with 5 components
        # n[0] = c, n[1] = k, c[2] = f, n[4] = y0, n[5] = y1
        init_cond = n[3], n[4]
        args = n[0], n[1], n[2], w
        sol_odeint_full[j] = discretize_oscillator_odeint(
            model, atol, rtol, init_cond, args, t, t_interest)

    # obtain the gpc approximation
    sol_gpc_full_approx = cp.fit_quadrature(P, nodes_full, weights_full,
                                            sol_odeint_full)

    # compute statistics
    mean_full = cp.E(sol_gpc_full_approx, distr_5D)
    var_full = cp.Var(sol_gpc_full_approx, distr_5D)
    ##################################################################

    #################### full grid computations #####################
    # get the sparse quadrature nodes and weight
    nodes_sparse, weights_sparse = cp.generate_quadrature(quad_deg_1D,
                                                          distr_5D,
                                                          rule='G',
                                                          sparse=True)
    # create vector to save the solution
    sol_odeint_sparse = np.zeros(len(nodes_sparse.T))

    # perform sparse pseudo-spectral approximation
    for j, n in enumerate(nodes_sparse.T):
        # each n is a vector with 5 components
        # n[0] = c, n[1] = k, c[2] = f, n[4] = y0, n[5] = y1
示例#18
0
dist_a = cp.Uniform(0, 0.001)
dist_I = cp.Uniform(10, 16)
dist = cp.J(dist_a, dist_I)

t = np.linspace(0, 1200, 10)

# polynome de chaos
# utilisant la methode Pseudo-Spectrale et une quadrature Gaussienne
ordre = 5
P, norms = cp.orth_ttr(ordre, dist, retall=True)
nodes, weights = cp.generate_quadrature(ordre+1, dist, rule="G")
solves = [u(t, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

E1 = cp.E(U_hat, dist)
Var1 = cp.Var(U_hat, dist)
print('Polynome de chaos utilisant la methode Pseudo-Spectrale')
print('et une quadrature Gaussienne :')
print('E : ',E1)
print('Var : ',Var1)

u_up1=E1+np.sqrt(Var1)
u_dw1=E1-np.sqrt(Var1)

# polynome de chaos
# utilisant la methode "Point Collocation" et des tirages pseudo aleatoires
ordre = 5
P = cp.orth_ttr(ordre, dist)
nodes = dist.sample(2*len(P), "M")
solves = [u(t, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_regression(P, nodes, solves, rule="T")
示例#19
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : pandas DataFrame
            Input data for analysis.

        Returns
        -------
        PCEAnalysisResults
            Use it to get the sobol indices and other information.
        """
        def sobols(P, coefficients):
            """ Utility routine to calculate sobols based on coefficients
            """
            A = np.array(P.coefficients) != 0
            multi_indices = np.array(
                [P.exponents[A[:, i]].sum(axis=0) for i in range(A.shape[1])])
            sobol_mask = multi_indices != 0
            _, index = np.unique(sobol_mask, axis=0, return_index=True)
            index = np.sort(index)
            sobol_idx_bool = sobol_mask[index]
            sobol_idx_bool = np.delete(sobol_idx_bool, [0], axis=0)
            n_sobol_available = sobol_idx_bool.shape[0]
            if len(coefficients.shape) == 1:
                n_out = 1
            else:
                n_out = coefficients.shape[1]
            n_coeffs = coefficients.shape[0]
            sobol_poly_idx = np.zeros([n_coeffs, n_sobol_available])
            for i_sobol in range(n_sobol_available):
                sobol_poly_idx[:, i_sobol] = np.all(
                    sobol_mask == sobol_idx_bool[i_sobol], axis=1)
            sobol = np.zeros([n_sobol_available, n_out])
            for i_sobol in range(n_sobol_available):
                sobol[i_sobol] = np.sum(np.square(
                    coefficients[sobol_poly_idx[:, i_sobol] == 1]),
                                        axis=0)
            idx_sort_descend_1st = np.argsort(sobol[:, 0], axis=0)[::-1]
            sobol = sobol[idx_sort_descend_1st, :]
            sobol_idx_bool = sobol_idx_bool[idx_sort_descend_1st]
            sobol_idx = [0 for _ in range(sobol_idx_bool.shape[0])]
            for i_sobol in range(sobol_idx_bool.shape[0]):
                sobol_idx[i_sobol] = np.array(
                    [i for i, x in enumerate(sobol_idx_bool[i_sobol, :]) if x])
            var = ((coefficients[1:]**2).sum(axis=0))
            sobol = sobol / var
            return sobol, sobol_idx, sobol_idx_bool

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
            'fit': {},
            'Fourier_coefficients': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        #        samples = {k: [] for k in qoi_cols}
        #        for run_id in data_frame[('run_id', 0)].unique():
        #            for k in qoi_cols:
        #                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
        #                samples[k].append(data.values.flatten())

        samples = {k: [] for k in qoi_cols}
        for k in qoi_cols:
            samples[k] = data_frame[k].values

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit, fc = cp.fit_regression(P, nodes, samples[k], retall=1)
            else:
                fit, fc = cp.fit_quadrature(P,
                                            nodes,
                                            weights,
                                            samples[k],
                                            retall=1)
            results['fit'][k] = fit
            results['Fourier_coefficients'][k] = fc

            # Percentiles: 1%, 10%, 50%, 90% and 99%
            P01, P10, P50, P90, P99 = cp.Perc(
                fit, [1, 10, 50, 90, 99], self.sampler.distribution).squeeze()
            results['percentiles'][k] = {
                'p01': P01,
                'p10': P10,
                'p50': P50,
                'p90': P90,
                'p99': P99
            }

            if self.sampling:  # use chaospy's sampling method

                # Statistical moments
                mean = cp.E(fit, self.sampler.distribution)
                var = cp.Var(fit, self.sampler.distribution)
                std = cp.Std(fit, self.sampler.distribution)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
                sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
                sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
                sobols_first_dict = {}
                sobols_second_dict = {}
                sobols_total_dict = {}
                for i, param_name in enumerate(self.sampler.vary.vary_dict):
                    sobols_first_dict[param_name] = sobols_first_narr[i]
                    sobols_second_dict[param_name] = sobols_second_narr[i]
                    sobols_total_dict[param_name] = sobols_total_narr[i]

                results['sobols_first'][k] = sobols_first_dict
                results['sobols_second'][k] = sobols_second_dict
                results['sobols_total'][k] = sobols_total_dict

            else:  # use PCE coefficients

                # Statistical moments
                mean = fc[0]
                var = np.sum(fc[1:]**2, axis=0)
                std = np.sqrt(var)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobol, sobol_idx, _ = sobols(P, fc)
                varied = [_ for _ in self.sampler.vary.get_keys()]
                S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                #S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
                #for v in varied: del S2[v][v]
                S2 = {
                    _: np.zeros((len(varied), sobol.shape[-1]))
                    for _ in varied
                }
                for n, si in enumerate(sobol_idx):
                    if len(si) == 1:
                        v = varied[si[0]]
                        S1[v] = sobol[n]
                    elif len(si) == 2:
                        v1 = varied[si[0]]
                        v2 = varied[si[1]]
                        #S2[v1][v2] = sobol[n]
                        #S2[v2][v1] = sobol[n]
                        S2[v1][si[1]] = sobol[n]
                        S2[v2][si[0]] = sobol[n]
                    for i in si:
                        ST[varied[i]] += sobol[n]

                results['sobols_first'][k] = S1
                results['sobols_second'][k] = S2
                results['sobols_total'][k] = ST

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
        orth_poly = cp.orth_ttr(N[i], distr_unif_w, normed = True)

        #evaluate f(x) at all quadrature nodes and take the y(10), i.e [-1]
        y_out = [discretize_oscillator_odeint(model, init_cond, x_axis, (c, k, f, node), atol, rtol)[-1] for node in nodes[0]]

        # find generalized Polynomial chaos and expansion coefficients
        gPC_m, expansion_coeff = cp.fit_quadrature(orth_poly, nodes, weights, y_out, retall = True)
        #gPC_m = cp.fit_quadrature(orth_poly, nodes, weights, y_out)

        # gPC_m is the polynomial that approximates the most
        print(f'Expansion coeff chaospy: {expansion_coeff}')
        print(f'The best polynomial of degree {n} that approximates f(x): {cp.around(gPC_m, 1)}')
        print(f'Expansion coeff [0] = {expansion_coeff[0]}')#, expect_weights: {expect_y}')

        mu[i] = cp.E(gPC_m, distr_unif_w)
        V[i]= cp.Var(gPC_m, distr_unif_w)

        print("mu = %.8f,V = %.8f" % (mu[i], V[i]))


# manual calculation of the expansion coefficients
#Note if you do it in the same loop, the mean results changing only due to the fact that we do the loop over K[i] without any action
    print("____________Manual expansion coefficients__________")

    for i, n in enumerate(N):
        # generate K Gaussian nodes and weights based on normal distr (we need to appr with quadratures)
        nodes, weights = cp.generate_quadrature(K[i], distr_unif_w, rule="G")  # nodes [[1,2]]

        # appr with gaussian polynomials
        orth_poly = cp.orth_ttr(N[i], distr_unif_w, normed=True)
示例#21
0
def test_descriptives():
    dist = cp.Iid(cp.Normal(), dim)
    orth = cp.orth_ttr(order, dist)
    cp.E(orth, dist)
    cp.Var(orth, dist)
    cp.Cov(orth, dist)
def test_lagrange_variance(lagrange_approximation, joint, true_variance):
    assert numpy.allclose(chaospy.Var(lagrange_approximation, joint),
                          true_variance,
                          rtol=1e-2)