Ejemplo n.º 1
0
def objfunc(xdict):
    dv = xdict['xvars']  # Get the design variable out
    UQObj.QoI.p['oas_example1.wing.twist_cp'] = dv
    obj_func = UQObj.QoI.eval_QoI
    con_func = UQObj.QoI.eval_ConstraintQoI
    funcs = {}

    # Objective function

    # Full integration
    mu_j = collocation_obj.normal.mean(cp.E(UQObj.jdist), cp.Std(UQObj.jdist),
                                       obj_func)
    var_j = collocation_obj.normal.variance(obj_func, UQObj.jdist, mu_j)
    # # Reduced integration
    # mu_j = collocation_obj.normal.reduced_mean(obj_func, UQObj.jdist, UQObj.dominant_space)
    # var_j = collocation_obj.normal.reduced_variance(obj_func, UQObj.jdist, UQObj.dominant_space, mu_j)
    funcs['obj'] = mu_j + 2 * np.sqrt(var_j)

    # Constraint function
    # Full integration
    funcs['con'] = collocation_con.normal.mean(cp.E(UQObj.jdist),
                                               cp.Std(UQObj.jdist), con_func)
    # # Reduced integration
    # funcs['con'] = collocation_con.normal.reduced_mean(con_func, UQObj.jdist, UQObj.dominant_space)
    fail = False
    return funcs, fail
Ejemplo n.º 2
0
def sens(xdict, funcs):
    dv = xdict['xvars']  # Get the design variable out
    UQObj.QoI.p['oas_example1.wing.twist_cp'] = dv
    obj_func = UQObj.QoI.eval_ObjGradient
    con_func = UQObj.QoI.eval_ConstraintQoIGradient
    funcsSens = {}

    # Objective function
    # Full integration
    g_mu_j = collocation_grad_obj.normal.mean(cp.E(UQObj.jdist),
                                              cp.Std(UQObj.jdist), obj_func)
    g_var_j = collocation_grad_obj.normal.variance(obj_func, UQObj.jdist,
                                                   g_mu_j)
    # # Reduced integration
    # g_mu_j = collocation_grad_obj.normal.reduced_mean(obj_func, UQObj.jdist, UQObj.dominant_space)
    # g_var_j = collocation_grad_obj.normal.reduced_variance(obj_func, UQObj.jdist, UQObj.dominant_space, g_mu_j)

    funcsSens['obj', 'xvars'] = g_mu_j + 2 * np.sqrt(
        g_var_j
    )  # collocation_grad_obj.normal.reduced_mean(obj_func, UQObj.jdist, UQObj.dominant_space)

    # Constraint function
    # Full integration
    funcsSens['con', 'xvars'] = collocation_grad_con.normal.mean(
        cp.E(UQObj.jdist), cp.Std(UQObj.jdist), con_func)
    # # Reduced integration
    # funcsSens['con', 'xvars'] = collocation_grad_con.normal.reduced_mean(con_func, UQObj.jdist, UQObj.dominant_space)
    fail = False
    return funcsSens, fail
Ejemplo n.º 3
0
    def __init__(self, uq_systemsize):

        mean_v = 248.136  # Mean value of input random variable
        mean_alpha = 5  #
        mean_Ma = 0.84
        mean_re = 1.e6
        mean_rho = 0.38
        mean_cg = np.zeros((3))

        std_dev = np.diag([0.2, 0.01])  # np.diag([1.0, 0.2, 0.01, 1.e2, 0.01])
        rv_dict = {  # 'v' : mean_v,
            'alpha': mean_alpha,
            'Mach_number': mean_Ma,
            # 're' : mean_re,
            # 'rho' : mean_rho,
        }
        self.QoI = examples.OASAerodynamicWrapper(uq_systemsize, rv_dict)
        self.jdist = cp.Normal(self.QoI.rv_array, std_dev)
        self.dominant_space = DimensionReduction(
            n_arnoldi_sample=uq_systemsize + 1, exact_Hessian=False)
        self.dominant_space.getDominantDirections(self.QoI,
                                                  self.jdist,
                                                  max_eigenmodes=1)

        # print 'iso_eigenvals = ', self.dominant_space.iso_eigenvals
        # print 'iso_eigenvecs = ', '\n', self.dominant_space.iso_eigenvecs
        # print 'dominant_indices = ', self.dominant_space.dominant_indices
        # print 'ratio = ', abs(self.dominant_space.iso_eigenvals[0] / self.dominant_space.iso_eigenvals[1])
        print 'std_dev = ', cp.Std(self.jdist), '\n'
        print 'cov = ', cp.Cov(self.jdist), '\n'
Ejemplo n.º 4
0
    def getMCResults4Web(self):
        '''
        Use surrogate model to perform Monte Carlo simulation
        @param nmc, number of Monte Carlo realizations 
        '''
        if (self.co2Model is None):
            self.getMetaModels()

        meanCO2 = cp.E(self.co2Model, self.jointDist)
        stdCO2 = cp.Std(self.co2Model, self.jointDist)

        meanBrine = cp.E(self.brineModel, self.jointDist)
        stdBrine = cp.Std(self.brineModel, self.jointDist)

        return [
            meanCO2[self.nlw:], stdCO2[self.nlw:], meanBrine[self.nlw:],
            stdBrine[self.nlw:]
        ]
    def solve_nonlinear(self, params, unknowns, resids):

        power = params['power']
        method_dict = params['method_dict']
        dist = method_dict['distribution']
        rule = method_dict['rule']
        n = len(power)
        if rule != 'rectangle':
            points, weights = cp.generate_quadrature(order=n - 1,
                                                     domain=dist,
                                                     rule=rule)
        # else:
        #     points, weights = quadrature_rules.rectangle(n, method_dict['distribution'])

        poly = cp.orth_chol(n - 1, dist)
        # poly = cp.orth_bert(n-1, dist)
        # double check this is giving me good orthogonal polynomials.
        # print poly, '\n'
        p2 = cp.outer(poly, poly)
        # print 'chol', cp.E(p2, dist)
        norms = np.diagonal(cp.E(p2, dist))
        print 'diag', norms

        expansion, coeff = cp.fit_quadrature(poly,
                                             points,
                                             weights,
                                             power,
                                             retall=True,
                                             norms=norms)
        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)

        mean = cp.E(expansion, dist)
        print 'mean cp.E =', mean
        # mean = sum(power*weights)
        print 'mean sum =', sum(power * weights)
        print 'mean coeff =', coeff[0]
        std = cp.Std(expansion, dist)

        print mean
        print std
        print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns['mean'] = mean * hours
        unknowns['std'] = std * hours

        print 'In ChaospyStatistics'
def calculate_uqsa_measures(joint_dist, polynomial, alpha=5):
    """ Use chaospy to calculate appropriate indices of uq and sa"""
    dists = joint_dist
    mean = cp.E(polynomial, dists)
    var = cp.Var(polynomial, dists)
    std = cp.Std(polynomial, dists)
    conInt = cp.Perc(polynomial, [alpha / 2., 100 - alpha / 2.], joint_dist)
    sens_m = cp.Sens_m(polynomial, dists)
    sens_m2 = cp.Sens_m2(polynomial, dists)
    sens_t = cp.Sens_t(polynomial, dists)
    return dict(mean=mean,
                var=var,
                std=std,
                conInt=conInt,
                sens_m=sens_m,
                sens_m2=sens_m2,
                sens_t=sens_t)
    def check_3sigma_violation(self, rv_arr, jdist):
        mu = cp.E(jdist)
        sigma = cp.Std(jdist)  # Standard deviation
        upper_bound = mu + 3 * sigma
        lower_bound = mu - 3 * sigma
        ctr = 0
        idx_list = []  # List of all the violating entries
        for i in range(self.n_monte_carlo_samples):
            sample = rv_arr[:, i]
            if all(sample > lower_bound) == False or all(
                    sample < upper_bound) == False:
                idx_list.append(i)

        # print("number of violations = ", len(idx_list))
        # print(idx_list)
        # Delete the arrays from the idx_list
        new_samples = np.delete(rv_arr, idx_list, axis=1)

        return new_samples
    def test_derivatives_scalarQoI(self):

        systemsize = 3
        mu = np.random.rand(systemsize)
        std_dev = np.diag(np.random.rand(systemsize))
        jdist = cp.MvNormal(mu, std_dev)
        # Create QoI Object
        QoI = Paraboloid3D(systemsize)

        # Create the Monte Carlo object
        deriv_dict = {
            'xi': {
                'dQoI_func': QoI.eval_QoIGradient,
                'output_dimensions': systemsize
            }
        }
        QoI_dict = {
            'paraboloid': {
                'QoI_func': QoI.eval_QoI,
                'output_dimensions': 1,
                'deriv_dict': deriv_dict
            }
        }

        nsample = 1000000
        mc_obj = MonteCarlo(nsample, jdist, QoI_dict, include_derivs=True)
        mc_obj.getSamples(jdist, include_derivs=True)
        dmu_j = mc_obj.dmean(jdist, of=['paraboloid'], wrt=['xi'])
        dvar_j = mc_obj.dvariance(jdist, of=['paraboloid'], wrt=['xi'])

        # Analytical dmu_j
        dmu_j_analytical = np.array([100 * mu[0], 50 * mu[1], 2 * mu[2]])
        err = abs(
            (dmu_j['paraboloid']['xi'] - dmu_j_analytical) / dmu_j_analytical)
        self.assertTrue((err < 0.01).all())

        # Analytical dvar_j
        rv_dev = cp.Std(jdist)
        dvar_j_analytical = np.array([(100 * rv_dev[0])**2,
                                      (50 * rv_dev[1])**2, (2 * rv_dev[2])**2])
        err = abs((dvar_j['paraboloid']['xi'] - dvar_j_analytical) /
                  dvar_j_analytical)
    def get_truncated_samples(self, jdist):
        mu = cp.E(jdist)
        sigma = cp.Std(jdist)
        upper_bound = mu + 3 * sigma
        lower_bound = mu - 3 * sigma

        dist0 = cp.Truncnorm(lo=lower_bound[0],
                             up=upper_bound[0],
                             mu=mu[0],
                             sigma=sigma[0])
        dist1 = cp.Truncnorm(lo=lower_bound[1],
                             up=upper_bound[1],
                             mu=mu[1],
                             sigma=sigma[1])
        dist2 = cp.Truncnorm(lo=lower_bound[2],
                             up=upper_bound[2],
                             mu=mu[2],
                             sigma=sigma[2])
        dist3 = cp.Truncnorm(lo=lower_bound[3],
                             up=upper_bound[3],
                             mu=mu[3],
                             sigma=sigma[3])
        dist4 = cp.Truncnorm(lo=lower_bound[4],
                             up=upper_bound[4],
                             mu=mu[4],
                             sigma=sigma[4])
        dist5 = cp.Truncnorm(lo=lower_bound[5],
                             up=upper_bound[5],
                             mu=mu[5],
                             sigma=sigma[5])
        dist6 = cp.Truncnorm(lo=lower_bound[6],
                             up=upper_bound[6],
                             mu=mu[6],
                             sigma=sigma[6])

        trunc_jdist = cp.J(dist0, dist1, dist2, dist3, dist4, dist5, dist6)

        rv_arr = trunc_jdist.sample(self.n_monte_carlo_samples)
        return rv_arr
Ejemplo n.º 10
0
# Create 3rd order quadrature scheme
nodes, weights = cp.generate_quadrature(order=3,
                                        domain=distribution,
                                        rule="Gaussian")

u0 = 0.3
# Evaluate model at the nodes
x = np.linspace(0, 1, 101)
samples = [model(x, u0, node[0], node[1], node[2]) for node in nodes.T]

# Generate 3rd order orthogonal polynomial expansion
polynomials = cp.orth_ttr(order=3, dist=distribution)

# Create model approximation (surrogate solver)
model_approx = cp.fit_quadrature(polynomials, nodes, weights, samples)

# Model analysis
mean = cp.E(model_approx, distribution)
deviation = cp.Std(model_approx, distribution)

# Plot results
from matplotlib import pyplot as plt
plt.rc("figure", figsize=[8, 6])
plt.fill_between(x, mean - deviation, mean + deviation, color="k", alpha=0.5)
plt.plot(x, mean, "k", lw=2)
plt.xlabel("depth $x$")
plt.ylabel("porosity $u$")
plt.legend(["mean $\pm$ deviation", "mean"])
plt.savefig("ode.pdf")
# 4.1 generate quadrature nodes and weights
order = 5
nodes, weights = cp.generate_quadrature(order=order, domain=joint_distribution, rule='G')

# 4.2 evaluate the simple model for all nodes
model_evaluations = nodes[0]+nodes[1]*nodes[0]

# 4.3 use quadrature to generate the polynomial chaos expansion
gpce_quadrature = cp.fit_quadrature(poly, nodes, weights, model_evaluations)
# end example spectral projection

# example uq
exp_reg = cp.E(gpce_regression, joint_distribution)
exp_ps =  cp.E(gpce_quadrature, joint_distribution)

std_reg = cp.Std(gpce_regression, joint_distribution)
str_ps = cp.Std(gpce_quadrature, joint_distribution)

prediction_interval_reg = cp.Perc(gpce_regression, [5, 95], joint_distribution)
prediction_interval_ps = cp.Perc(gpce_quadrature, [5, 95], joint_distribution)

print("Expected values   Standard deviation            90 % Prediction intervals\n")
print(' E_reg |  E_ps     std_reg |  std_ps                pred_reg |  pred_ps')
print('  {} | {}       {:>6.3f} | {:>6.3f}       {} | {}'.format(exp_reg,
                                                                  exp_ps,
                                                                  std_reg,
                                                                  str_ps,
                                                                  ["{:.3f}".format(p) for p in prediction_interval_reg],
                                                                  ["{:.3f}".format(p) for p in prediction_interval_ps]))
# end example uq
Ejemplo n.º 12
0
def do_gpce(forward_problem,
            starting_parameters,
            first_V,
            second_V,
            distribution,
            quad_order=20,
            poly_order=30,
            plot=True):

    # Do the pseudo spectral projection
    abscissas, weights = chaospy.generate_quadrature(quad_order,
                                                     distribution,
                                                     rule="gaussian")

    polynomial_expansion = chaospy.generate_expansion(poly_order, distribution)
    evaluations = [forward_problem(abscissa[0]) for abscissa in abscissas.T]

    foo_approx = chaospy.fit_quadrature(polynomial_expansion, abscissas,
                                        weights, evaluations)
    expected = chaospy.E(foo_approx, distribution)
    std = chaospy.Std(foo_approx, distribution)

    print(expected, std)
    print(foo_approx)
    if plot:
        xs = np.linspace(
            distribution.mom(1) - math.sqrt(distribution.mom(2)) * 5,
            distribution.mom(1) + math.sqrt(distribution.mom(2)) * 5, 50)
        fig, ax = plt.subplots()
        ax.set_xlabel("p8")
        ax.axvline(distribution.mom(1),
                   linestyle="--",
                   label="mean parameter value")
        ax.plot(xs, [foo_approx(x)[0].sum() for x in xs],
                label="gpce approximation",
                color="blue")
        ax.plot(xs, [forward_problem(x)[0] for x in xs],
                label="true value",
                color="red")
        ax.set_ylabel("current /nA")

        ax2 = ax.twinx()
        x2s = np.linspace(xs[0], xs[-1], 10000)
        ax2.plot(xs,
                 distribution.pdf(xs),
                 "--",
                 label="probability density",
                 color="green")

        ax.legend()
        fig.savefig("normal_pdf_{}mV_{}mV.pdf".format(first_V, second_V))

        fig, ax = plt.subplots()
        ax.fill_between(coordinates, expected - std, expected + std, alpha=0.3)
        ax.plot(coordinates, expected)
        ax.set_xlabel("time /ms")
        ax.set_ylabel("current /nA")
        fig.savefig("pseudo_spectral_plot_{}mV_{}mV.pdf".format(
            first_V, second_V))
        print("plotted")

    return expected, std
Ejemplo n.º 13
0
    sample_scheme = 'R'
    # create samples
    samples = jpdf.sample(Ns, sample_scheme)
    # create orthogonal polynomials
    orthogonal_polynomials = cp.orth_ttr(polynomial_order, jpdf)
    # evaluate the model for all samples
    Y_area = model(pressure_range, samples)
    # polynomial chaos expansion
    polynomial_expansion = cp.fit_regression(orthogonal_polynomials, samples,
                                             Y_area.T)

    # calculate statistics
    plotMeanConfidenceAlpha = 5
    expected_value = cp.E(polynomial_expansion, jpdf)
    variance = cp.Var(polynomial_expansion, jpdf)
    standard_deviation = cp.Std(polynomial_expansion, jpdf)
    prediction_interval = cp.Perc(
        polynomial_expansion,
        [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
        jpdf)
    print('{:2.5f} | {:2.5f} : {}'.format(
        np.mean(expected_value) * unit_m2_cm2,
        np.mean(std) * unit_m2_cm2, name))

    # compute sensitivity indices
    S = cp.Sens_m(polynomial_expansion, jpdf)
    ST = cp.Sens_t(polynomial_expansion, jpdf)

    plt.figure('mean')
    plt.plot(pressure_range * unit_pa_mmhg,
             expected_value * unit_m2_cm2,
Ejemplo n.º 14
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame[('run_id', 0)].unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
                samples[k].append(data.values.flatten())

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit = cp.fit_regression(P, nodes, samples[k])
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles: 10% and 90%
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            for i, param_name in enumerate(self.sampler.vary.vary_dict):
                sobols_first_dict[param_name] = sobols_first_narr[i]
                sobols_second_dict[param_name] = sobols_second_narr[i]
                sobols_total_dict[param_name] = sobols_total_narr[i]

            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
Ejemplo n.º 15
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : pandas DataFrame
            Input data for analysis.

        Returns
        -------
        PCEAnalysisResults
            Use it to get the sobol indices and other information.
        """
        def sobols(P, coefficients):
            """ Utility routine to calculate sobols based on coefficients
            """
            A = np.array(P.coefficients) != 0
            multi_indices = np.array(
                [P.exponents[A[:, i]].sum(axis=0) for i in range(A.shape[1])])
            sobol_mask = multi_indices != 0
            _, index = np.unique(sobol_mask, axis=0, return_index=True)
            index = np.sort(index)
            sobol_idx_bool = sobol_mask[index]
            sobol_idx_bool = np.delete(sobol_idx_bool, [0], axis=0)
            n_sobol_available = sobol_idx_bool.shape[0]
            if len(coefficients.shape) == 1:
                n_out = 1
            else:
                n_out = coefficients.shape[1]
            n_coeffs = coefficients.shape[0]
            sobol_poly_idx = np.zeros([n_coeffs, n_sobol_available])
            for i_sobol in range(n_sobol_available):
                sobol_poly_idx[:, i_sobol] = np.all(
                    sobol_mask == sobol_idx_bool[i_sobol], axis=1)
            sobol = np.zeros([n_sobol_available, n_out])
            for i_sobol in range(n_sobol_available):
                sobol[i_sobol] = np.sum(np.square(
                    coefficients[sobol_poly_idx[:, i_sobol] == 1]),
                                        axis=0)
            idx_sort_descend_1st = np.argsort(sobol[:, 0], axis=0)[::-1]
            sobol = sobol[idx_sort_descend_1st, :]
            sobol_idx_bool = sobol_idx_bool[idx_sort_descend_1st]
            sobol_idx = [0 for _ in range(sobol_idx_bool.shape[0])]
            for i_sobol in range(sobol_idx_bool.shape[0]):
                sobol_idx[i_sobol] = np.array(
                    [i for i, x in enumerate(sobol_idx_bool[i_sobol, :]) if x])
            var = ((coefficients[1:]**2).sum(axis=0))
            sobol = sobol / var
            return sobol, sobol_idx, sobol_idx_bool

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
            'fit': {},
            'Fourier_coefficients': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        #        samples = {k: [] for k in qoi_cols}
        #        for run_id in data_frame[('run_id', 0)].unique():
        #            for k in qoi_cols:
        #                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
        #                samples[k].append(data.values.flatten())

        samples = {k: [] for k in qoi_cols}
        for k in qoi_cols:
            samples[k] = data_frame[k].values

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit, fc = cp.fit_regression(P, nodes, samples[k], retall=1)
            else:
                fit, fc = cp.fit_quadrature(P,
                                            nodes,
                                            weights,
                                            samples[k],
                                            retall=1)
            results['fit'][k] = fit
            results['Fourier_coefficients'][k] = fc

            # Percentiles: 1%, 10%, 50%, 90% and 99%
            P01, P10, P50, P90, P99 = cp.Perc(
                fit, [1, 10, 50, 90, 99], self.sampler.distribution).squeeze()
            results['percentiles'][k] = {
                'p01': P01,
                'p10': P10,
                'p50': P50,
                'p90': P90,
                'p99': P99
            }

            if self.sampling:  # use chaospy's sampling method

                # Statistical moments
                mean = cp.E(fit, self.sampler.distribution)
                var = cp.Var(fit, self.sampler.distribution)
                std = cp.Std(fit, self.sampler.distribution)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
                sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
                sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
                sobols_first_dict = {}
                sobols_second_dict = {}
                sobols_total_dict = {}
                for i, param_name in enumerate(self.sampler.vary.vary_dict):
                    sobols_first_dict[param_name] = sobols_first_narr[i]
                    sobols_second_dict[param_name] = sobols_second_narr[i]
                    sobols_total_dict[param_name] = sobols_total_narr[i]

                results['sobols_first'][k] = sobols_first_dict
                results['sobols_second'][k] = sobols_second_dict
                results['sobols_total'][k] = sobols_total_dict

            else:  # use PCE coefficients

                # Statistical moments
                mean = fc[0]
                var = np.sum(fc[1:]**2, axis=0)
                std = np.sqrt(var)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobol, sobol_idx, _ = sobols(P, fc)
                varied = [_ for _ in self.sampler.vary.get_keys()]
                S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                #S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
                #for v in varied: del S2[v][v]
                S2 = {
                    _: np.zeros((len(varied), sobol.shape[-1]))
                    for _ in varied
                }
                for n, si in enumerate(sobol_idx):
                    if len(si) == 1:
                        v = varied[si[0]]
                        S1[v] = sobol[n]
                    elif len(si) == 2:
                        v1 = varied[si[0]]
                        v2 = varied[si[1]]
                        #S2[v1][v2] = sobol[n]
                        #S2[v2][v1] = sobol[n]
                        S2[v1][si[1]] = sobol[n]
                        S2[v2][si[0]] = sobol[n]
                    for i in si:
                        ST[varied[i]] += sobol[n]

                results['sobols_first'][k] = S1
                results['sobols_second'][k] = S2
                results['sobols_total'][k] = ST

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
Ejemplo n.º 16
0
    Sensitivities=np.column_stack((S_mc,s**2))
    row_labels= ['S_'+str(idx) for idx in range(1,Nrv+1)]
    print("First Order Indices")
    print(pd.DataFrame(Sensitivities,columns=['Smc','Sa'],index=row_labels).round(3))
    # end Monte Carlo

    # Polychaos computations
    Ns_pc = 80
    samples_pc = jpdf.sample(Ns_pc)
    polynomial_order = 4
    poly = cp.orth_ttr(polynomial_order, jpdf)
    Y_pc = linear_model(w, samples_pc.T)
    approx = cp.fit_regression(poly, samples_pc, Y_pc, rule="T")

    exp_pc = cp.E(approx, jpdf)
    std_pc = cp.Std(approx, jpdf)
    print("Statistics polynomial chaos\n")
    print('\n        E(Y)  |  std(Y) \n')
    print('pc  : {:2.5f} | {:2.5f}'.format(float(exp_pc), std_pc))
    
    
    S_pc = cp.Sens_m(approx, jpdf)

    Sensitivities=np.column_stack((S_mc,S_pc, s**2))
    print("\nFirst Order Indices")
    print(pd.DataFrame(Sensitivities,columns=['Smc','Spc','Sa'],index=row_labels).round(3))

#     print("\nRelative errors")
#     rel_errors=np.column_stack(((S_mc - s**2)/s**2,(S_pc - s**2)/s**2))
#     print(pd.DataFrame(rel_errors,columns=['Error Smc','Error Spc'],index=row_labels).round(3))
Ejemplo n.º 17
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get the Polynomial
        P = self.sampler.P

        # Get the PCE variante to use (Regression or Projection)
        regression = self.sampler.regression

        # Compute nodes (and weights)
        if regression:
            nodes = cp.generate_samples(order=self.sampler.n_samples,
                                        domain=self.sampler.distribution,
                                        rule=self.sampler.rule)
        else:
            nodes, weights = cp.generate_quadrature(
                order=self.sampler.quad_order,
                dist=self.sampler.distribution,
                rule=self.sampler.rule,
                sparse=self.sampler.quad_sparse,
                growth=self.sampler.quad_growth)

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame.run_id.unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame['run_id'] == run_id][k]
                samples[k].append(data.values)

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                if samples[k][0].dtype == object:
                    for i in range(self.sampler.count):
                        samples[k][i] = samples[k][i].astype("float64")
                fit = cp.fit_regression(P, nodes, samples[k], "T")
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles (Pxx)
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            ipar = 0
            i = 0
            for param_name in self.sampler.vary.get_keys():
                j = self.sampler.params_size[ipar]
                sobols_first_dict[param_name] = sobols_first_narr[i:i + j]
                sobols_second_dict[param_name] = sobols_second_narr[i:i + j]
                sobols_total_dict[param_name] = sobols_total_narr[i:i + j]
                i += j
                ipar += 1
            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return results
Ejemplo n.º 18
0
    for j in range(0,SimLength):
        dist = cp.Normal(np.mean(WindSpeed_all[:,j]),np.std(WindSpeed_all[:,j]))
        orthPoly = cp.orth_ttr(polyOrder, dist)
        dataPoints_initial = dist.sample(NoOfSamples[i],rule='L')
        dataPoints = np.zeros(NoOfSamples[i])
        samples_u = np.zeros(NoOfSamples[i])
        #idx = nNoOfSamplesp.zeros(NoOfSamples[i])
        u = Gamma_all[:,j]
        ws= WindSpeed_all[:,j]
        for k in range(0,NoOfSamples[i]):
            idx= (np.abs(ws - dataPoints_initial[k])).argmin()
            dataPoints[k] = ws[idx]
            samples_u[k] = u[idx]
        approx = cp.fit_regression(orthPoly, dataPoints, samples_u)
        GammaMeanPCE[i,j] = cp.E(approx, dist)
        GammaStdPCE[i,j] = cp.Std(approx, dist)
        print(i,j)

#%%
timestr = time.strftime("%Y%m%d")

f = open(timestr+'_GammaMean.pckl', 'wb')
pickle.dump(GammaMean, f)
f.close()

f = open(timestr+'_GammaStd.pckl', 'wb')
pickle.dump(GammaStd, f)
f.close()

f = open(timestr+'_SeedNo.pckl', 'wb')
pickle.dump(SeedNo, f)
Ejemplo n.º 19
0
                                                    errorOperator2,
                                                    10**-10,
                                                    do_plot=False)
nodes, weights = adaptiveCombiInstanceExtend.get_points_and_weights()
print("Number of points:", len(nodes))
print("Sum of weights:", sum(weights))
weights = np.asarray(weights) * 1.0 / sum(weights)
nodes_transpose = list(zip(*nodes))

#################################################################################################
# propagate the uncertainty
value_of_interests = [model(node) for node in nodes]
value_of_interests = np.asarray(value_of_interests)
print("Mean", np.inner(weights, value_of_interests))
#################################################################################################
# generate orthogonal polynomials for the distribution
OP = cp.orth_ttr(3, dist)

#################################################################################################
# generate the general polynomial chaos expansion polynomial
gPCE = cp.fit_quadrature(OP, nodes_transpose, weights, value_of_interests)

#################################################################################################
# calculate statistics
E = cp.E(gPCE, dist)
StdDev = cp.Std(gPCE, dist)

#print the stastics
print("mean: %f" % E)
print("stddev: %f" % StdDev)
Ejemplo n.º 20
0
def plot_figures():
    """Plot figures for tutorial."""
    numpy.random.seed(1000)

    def foo(coord, param):
        return param[0] * numpy.e**(-param[1] * coord)

    coord = numpy.linspace(0, 10, 200)
    distribution = cp.J(cp.Uniform(1, 2), cp.Uniform(0.1, 0.2))

    samples = distribution.sample(50)
    evals = numpy.array([foo(coord, sample) for sample in samples.T])

    plt.plot(coord, evals.T, "k-", lw=3, alpha=0.2)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.savefig("demonstration.png")
    plt.clf()

    samples = distribution.sample(1000, "H")
    evals = [foo(coord, sample) for sample in samples.T]
    expected = numpy.mean(evals, 0)
    deviation = numpy.std(evals, 0)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using Monte Carlo simulation")
    plt.savefig("results_montecarlo.png")
    plt.clf()

    polynomial_expansion = cp.orth_ttr(8, distribution)
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)
    expected = cp.E(foo_approx, distribution)
    deviation = cp.Std(foo_approx, distribution)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using point collocation method")
    plt.savefig("results_collocation.png")
    plt.clf()

    absissas, weights = cp.generate_quadrature(8, distribution, "C")
    evals = [foo(coord, val) for val in absissas.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, absissas, weights,
                                   evals)
    expected = cp.E(foo_approx, distribution)
    deviation = cp.Std(foo_approx, distribution)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using psuedo-spectral projection method")
    plt.savefig("results_spectral.png")
    plt.clf()
Ejemplo n.º 21
0
def gpc(dists, distsMeta, wallModel, order, hdf5group, sampleScheme='M'):
    print "\n GeneralizedPolynomialChaos - order {}\n".format(order)

    dim = len(dists)

    expansionOrder = order
    numberOfSamples = 4 * cp.terms(expansionOrder, dim)

    # Sample in independent space
    samples = dists.sample(numberOfSamples, sampleScheme).transpose()
    model = wallModel(distsMeta)

    # Evaluate the model (which is not linear obviously)
    pool = multiprocessing.Pool()
    data = pool.map(model, samples)
    pool.close()
    pool.join()
    C_data = [retval[0] for retval in data]
    a_data = [retval[1] for retval in data]

    C_data = np.array(C_data)
    a_data = np.array(a_data)
    # Orthogonal C_polynomial from marginals
    orthoPoly = cp.orth_ttr(expansionOrder, dists)

    for data, outputName in zip([C_data, a_data], ['Compliance', 'Area']):

        # Fit the model together in independent space
        C_polynomial = cp.fit_regression(orthoPoly, samples.transpose(), data)

        # save data to dictionary
        plotMeanConfidenceAlpha = 5

        C_mean = cp.E(C_polynomial, dists)
        C_std = cp.Std(C_polynomial, dists)

        Si = cp.Sens_m(C_polynomial, dists)
        STi = cp.Sens_t(C_polynomial, dists)

        C_conf = cp.Perc(
            C_polynomial,
            [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
            dists)

        a = np.linspace(0, 100, 1000)
        da = a[1] - a[0]
        C_cdf = cp.Perc(C_polynomial, a, dists)

        C_pdf = da / (C_cdf[1::] - C_cdf[0:-1])
        # Resample to generate full histogram
        samples2 = dists.sample(numberOfSamples * 100, sampleScheme)
        C_data2 = C_polynomial(*samples2).transpose()

        # save in hdf5 file
        solutionDataGroup = hdf5group.create_group(outputName)

        solutionData = {
            'mean': C_mean,
            'std': C_std,
            'confInt': C_conf,
            'Si': Si,
            'STi': STi,
            'cDataGPC': C_data,
            'samplesGPC': samples,
            'cData': C_data2,
            'samples': samples2.transpose(),
            'C_pdf': C_pdf
        }

        for variableName, variableValue in solutionData.iteritems():
            solutionDataGroup.create_dataset(variableName, data=variableValue)
	#Net.Control_Input('Trials/StochasticDemandsPWGInput.csv')
	Net.Control_Input(Directory+'Demands.csv')
	Net.MOC_Run(10)
	Output[:,i] = Net.nodes[1].TranH
	

#Net.MOC_Run(86350)
#Net.geom_Plot(plot_Node_Names = True)
#Net.transient_Node_Plot(['6','10','13','16','21','24','31'])

#Net.transient_Node_Plot(['1','2','3','4','5','6'])

#for Node in Net.nodes:
#	np.save(Directory +'MeasureData'+str(Node.Name)+'.npy',Node.TranH)
	
#for Node in Net.nodes:
#	pp.scatter([int(Node.Name)], [np.mean(Node.TranH)])
#PE = np.zeros(9999)#999)
#KE = np.zeros(9999)
#for pipe in Net.pipes:
#	PE += np.array(pipe.PE)
#	KE += np.array(pipe.KE)
polynomial_expansion = cp.orth_ttr(1, distribution)
foo_approx = cp.fit_regression(polynomial_expansion, samples[:10], Output[:,:10].T)

expected = cp.E(foo_approx, distribution)
deviation = cp.Std(foo_approx, distribution)


x,y = normal_dist(expected[-1],deviation[-1]**2)
Ejemplo n.º 23
0
uq = profit.UQ(yaml='uq.yaml')
distribution = cp.J(*uq.params.values())
sparse = uq.backend.sparse
if sparse:
    order = 2 * 3
else:
    order = 3 + 1

# actually start the postprocessing now:

nodes, weights = cp.generate_quadrature(order,
                                        distribution,
                                        rule='G',
                                        sparse=sparse)
expansion, norms = cp.orth_ttr(3, distribution, retall=True)
approx_denit = cp.fit_quadrature(expansion, nodes, weights,
                                 np.mean(data[:, 1, :], axis=1))
approx_oxy = cp.fit_quadrature(expansion, nodes, weights,
                               np.mean(data[:, 0, :], axis=1))

annual_oxy = cp.fit_quadrature(expansion, nodes, weights, data[:, 0, :])
annual_denit = cp.fit_quadrature(expansion, nodes, weights, data[:, 1, :])

s_denit = cp.descriptives.sensitivity.Sens_m(annual_denit, distribution)
s_oxy = cp.descriptives.sensitivity.Sens_m(annual_oxy, distribution)

df_oxy = cp.Std(annual_oxy, distribution)
df_denit = cp.Std(annual_denit, distribution)
f0_oxy = cp.E(annual_oxy, distribution)
f0_denit = cp.E(annual_denit, distribution)
Alpha = cp.Normal(1,0.1)
Beta = cp.Normal(0.1,0.01)
F = cp.Normal(0.02,0.001)
K = cp.Uniform(0.01,0.05)
Distributions = cp.J(Alpha,Beta,F,K)


maxT = 60.0
dT = 0.002
time = np.arange(0,maxT+dT,dT)

Order = 3
NoSamples = 40
Output = TurbData[:NoSamples,:,-1]
polynomial_expansion = cp.orth_ttr(Order, Distributions)
foo_approx = cp.fit_regression(polynomial_expansion, Samples[:,:NoSamples], Output[:,-1])
expected = cp.E(foo_approx, Distributions)
deviation = cp.Std(foo_approx, Distributions)
COV = cp.Cov(foo_approx, Distributions)
#Perc = cp.Perc(foo_approx,[5,95],Distributions)

f,axs = pp.subplots(figsize=(9, 6),nrows = 1,ncols = 1,sharex=True)
axs.plot(time[1:],expected,'k')
axs.fill_between(time[1:],expected+deviation,expected-deviation,color='k',alpha=0.25)
#axs.fill_between(time[1:],Perc[0],Perc[1],colour = 'k',alpha= 0.25)

pp.show()