Пример #1
0
def test_integration():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.orth_ttr(order, dist, retall=1)
    gq = cp.generate_quadrature
    nodes, weights = gq(order, dist, rule="C")
    vals = np.zeros((len(weights), size))
    cp.fit_quadrature(orth, nodes, weights, vals, norms=norms)
Пример #2
0
 def getMetaModels(self):
     '''
     This function generates meta models using pre-run samples  
     '''
     [leakCVol, leakBVol] = pkl.load(open('bigmat.dump', 'rb'))
     #this is the total numer of leak wells in each layer
     self.nlw = int(0.5 * leakCVol.shape[0])
     #
     #Note: the sample matrix needs to be nsamples * N_leakywells
     #
     self.co2Model = cp.fit_quadrature(self.polynomials, self.nodes,
                                       self.weights, leakCVol.T)
     self.brineModel = cp.fit_quadrature(self.polynomials, self.nodes,
                                         self.weights, leakBVol.T)
Пример #3
0
    def postProcess(self, Tmax=20, reLoad=True):
        #load the results files
        Tmax = Tmax * 365.25
        leakCVol = None
        if reLoad:
            print 'loading results from {0} files...'.format(self.nsamples)
            for runnum in range(self.nsamples):
                print runnum
                [monP, monh, cumMassCO2, cumMassBrine
                 ] = pkl.load(open('time%s_%s.dump' % (Tmax, runnum), 'rb'))
                if (leakCVol is None):
                    leakCVol = np.zeros((cumMassCO2.shape[0], self.nsamples))
                    leakBVol = np.zeros((cumMassBrine.shape[0], self.nsamples))
                leakCVol[:, runnum] = cumMassCO2
                leakBVol[:, runnum] = cumMassBrine
            print 'finished loading results'
            pkl.dump([leakCVol, leakBVol], open('bigmat.dump', 'wb'))
        else:
            [leakCVol, leakBVol] = pkl.load(open('bigmat.dump', 'rb'))

        layer = 2
        nlw = int(0.5 * leakCVol.shape[0])
        ilw = (layer - 1) * nlw + 0
        leak = leakCVol[ilw, :]

        #Note: the sample matrix needs to be nsamples * N_predictedpoints
        models = cp.fit_quadrature(self.polynomials, self.nodes, self.weights,
                                   leakCVol.T)
        #simple test
        asample = self.jointDist.sample(1, 'H')
        print 'leaked co2=', models(asample[0], asample[1], asample[2],
                                    asample[3], asample[4], asample[5])
Пример #4
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
Пример #5
0
    def fit(self, high_fidelity, num_evals=None, quadrature_rule='gaussian'):
        """Fits the low-fidelity surrogate via Polynomial Chaos Expansion.

        Parameters
        ----------
        high_fidelity : HighFidelityModel
            Model that we want to approximate through the low-fidelity.
        num_evals : int, default None
            Parameter provided for consistency, the actual number of evaluations is determined by the
            quadrature rule.
        quadrature_rule: str, default 'gaussian'
            Rule used for the quadrature (passed to chaospy.generate_quadrature.
        """
        abscissae, weights = cpy.generate_quadrature(self.degree,
                                                     self.prior,
                                                     rule=quadrature_rule)
        self.expansion = generate_expansion(self.degree,
                                            self.prior,
                                            retall=False)
        widgets = [
            'fit\t',
            pb.Percentage(), ' ',
            pb.Bar('='), ' ',
            pb.AdaptiveETA(), ' - ',
            pb.Timer()
        ]
        bar = pb.ProgressBar(maxval=abscissae.T.shape[0], widgets=widgets)
        evals = []
        bar.start()
        for i, z_ in enumerate(abscissae.T):
            evals.append(high_fidelity.eval(z_))
            bar.update(i + 1)
        self.proxy = cpy.fit_quadrature(self.expansion, abscissae, weights,
                                        evals)
        self._fit = True
Пример #6
0
def fit():
    nodes, weights = generate_quadrature(4,
                                         distribution,
                                         rule='G',
                                         sparse=False)
    print(np.max(nodes - indata.T))
    expansion = orth_ttr(3, distribution)
    return fit_quadrature(expansion, nodes, weights, outdata)
Пример #7
0
 def calculate_coefficients(self):
     evaluations = self.function(self.quad_points.T)
     self.f_approx, self.coefficients = cp.fit_quadrature(
         self.polynomial_expansion,
         self.quad_points,
         self.quad_weights,
         evaluations,
         retall=True)
Пример #8
0
def SRPCostRS(p, sim, pdf):

    polynomials = cp.orth_ttr(order=2, dist=pdf)
    samples, weights = cp.generate_quadrature(order=2,
                                              domain=pdf,
                                              rule="Gaussian")
    stateTensor = [SRPCost(p, sim, s) for s in samples.T]
    # stateTensor = pool.map(OptCost,samples.T)
    PCE = cp.fit_quadrature(polynomials, samples, weights, stateTensor)

    # print "PCE Expectation: {} ".format(cp.E(poly=PCE,dist=pdf))
    return cp.E(poly=PCE, dist=pdf)
    def solve_nonlinear(self, params, unknowns, resids):

        power = params['power']
        method_dict = params['method_dict']
        dist = method_dict['distribution']
        rule = method_dict['rule']
        n = len(power)
        if rule != 'rectangle':
            points, weights = cp.generate_quadrature(order=n - 1,
                                                     domain=dist,
                                                     rule=rule)
        # else:
        #     points, weights = quadrature_rules.rectangle(n, method_dict['distribution'])

        poly = cp.orth_chol(n - 1, dist)
        # poly = cp.orth_bert(n-1, dist)
        # double check this is giving me good orthogonal polynomials.
        # print poly, '\n'
        p2 = cp.outer(poly, poly)
        # print 'chol', cp.E(p2, dist)
        norms = np.diagonal(cp.E(p2, dist))
        print 'diag', norms

        expansion, coeff = cp.fit_quadrature(poly,
                                             points,
                                             weights,
                                             power,
                                             retall=True,
                                             norms=norms)
        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)

        mean = cp.E(expansion, dist)
        print 'mean cp.E =', mean
        # mean = sum(power*weights)
        print 'mean sum =', sum(power * weights)
        print 'mean coeff =', coeff[0]
        std = cp.Std(expansion, dist)

        print mean
        print std
        print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns['mean'] = mean * hours
        unknowns['std'] = std * hours

        print 'In ChaospyStatistics'
Пример #10
0
def evaluate_postprocessing(distribution, data, expansion):
    import matplotlib.pyplot as plt
    from profit import read_input
    from chaospy import generate_quadrature, orth_ttr, fit_quadrature, E, Std, descriptives

    nodes, weights = generate_quadrature(uq.backend.order + 1,
                                         distribution,
                                         rule='G')
    expansion = orth_ttr(uq.backend.order, distribution)
    approx = fit_quadrature(expansion, nodes, weights,
                            np.mean(data[:, 0, :], axis=1))
    urange = list(uq.params.values())[0].range()
    vrange = list(uq.params.values())[1].range()
    u = np.linspace(urange[0], urange[1], 100)
    v = np.linspace(vrange[0], vrange[1], 100)
    U, V = np.meshgrid(u, v)
    c = approx(U, V)

    # for 3 parameters:
    #wrange = list(uq.params.values())[2].range()
    #w = np.linspace(wrange[0], wrange[1], 100)
    #W = 0.03*np.ones(U.shape)
    #c = approx(U,V,W)

    plt.figure()
    plt.contour(U, V, c, 20)
    plt.colorbar()
    plt.scatter(config.eval_points[0, :],
                config.eval_points[1, :],
                c=np.mean(data[:, 0, :], axis=1))

    plt.show()

    F0 = E(approx, distribution)
    dF = Std(approx, distribution)
    sobol1 = descriptives.sensitivity.Sens_m(approx, distribution)
    sobolt = descriptives.sensitivity.Sens_t(approx, distribution)
    sobol2 = descriptives.sensitivity.Sens_m2(approx, distribution)

    print('F = {} +- {}%'.format(F0, 100 * abs(dF / F0)))
    print('1st order sensitivity indices:\n {}'.format(sobol1))
    print('Total order sensitivity indices:\n {}'.format(sobolt))
    print('2nd order sensitivity indices:\n {}'.format(sobol2))
def calculate_sobol_indices(quad_deg_1D, poly_deg_1D, joint_distr, sparse_bool,
                            title_names):
    nodes, weights = cp.generate_quadrature(quad_deg_1D,
                                            joint_distr,
                                            rule='G',
                                            sparse=sparse_bool)
    c, k, f, y0, y1 = nodes

    poly = cp.orth_ttr(poly_deg_1D, joint_distr, normed=True)

    y_out = [
        discretize_oscillator_odeint(model, atol, rtol, (y0_, y1_),
                                     (c_, k_, f_, w), t)[-1]
        for c_, k_, f_, y0_, y1_ in zip(c, k, f, y0, y1)
    ]

    # find generalized Polynomial chaos and expansion coefficients
    gPC_m, expansion_coeff = cp.fit_quadrature(poly,
                                               nodes,
                                               weights,
                                               y_out,
                                               retall=True)
    #print(f'The best polynomial of degree {poly_deg_1D} that approximates f(x): {cp.around(gPC_m, 1)}')
    # gPC_m is the polynomial that approximates the most
    print(
        f'Expansion coeff [0] (mean) for poly {poly_deg_1D} = {expansion_coeff[0]}'
    )  # , expect_weights: {expect_y}')
    #mu = cp.E(gPC_m, joint_distr)
    #print(f'Mean value from gPCE: {mu}')

    # Sobol indices
    first_order_Sobol_ind = cp.Sens_m(gPC_m, joint_distr)
    total_Sobol_ind = cp.Sens_t(gPC_m, joint_distr)

    print("The number of quadrature nodes for the grid is", len(nodes.T))
    print(f'The first order Sobol indices are \n {first_order_Sobol_ind}')
    print(f"The total Sobol' indices are \n {total_Sobol_ind}")

    plot_sobol_indices(first_order_Sobol_ind, title_names[0], False)
    plot_sobol_indices(total_Sobol_ind, title_names[1], False)

    return first_order_Sobol_ind, total_Sobol_ind
    def solve_nonlinear(self, params, unknowns, resids):

        power = params["dirPowers"]
        method_dict = params["method_dict"]
        dist = method_dict["distribution"]
        n = len(power)
        points, weights = cp.generate_quadrature(order=n - 1, domain=dist, rule="G")
        poly = cp.orth_ttr(
            n - 1, dist
        )  # Think about the n-1 for 1d for 2d or more it would be n-2. Details Dakota reference manual quadrature order.
        # Double check if giving me orthogonal polynomials
        # p2 = cp.outer(poly, poly)
        # norms = np.diagonal(cp.E(p2, dist))
        # print 'diag', norms

        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True, norms=norms)
        expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)
        # expansion, coeff = cp.fit_regression(poly, points, power, retall=True)

        mean = cp.E(expansion, dist, rule="G")
        # print 'mean cp.E =', mean
        # # mean = sum(power*weights)
        # print 'mean sum =', sum(power*weights)
        # print 'mean coeff =', coeff[0]*8760/1e6
        std = cp.Std(expansion, dist, rule="G")

        # print mean
        # print std
        # print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns["mean"] = mean * hours
        unknowns["std"] = std * hours

        # Modify the statistics to account for the truncation of the weibull (speed) case.
        modify_statistics(params, unknowns)  # It doesn't do anything for the direction case.

        print "In ChaospyStatistics"
Пример #13
0
plt.figure()
plt.plot(indata['alfa'], indata['Rf'], 'x')
plt.xlabel('alpha')
plt.ylabel('Rf')

#%%

distribution = J(*uq.params.values())
nodes, weights = generate_quadrature(uq.backend.order + 1,
                                     distribution,
                                     rule='G',
                                     sparse=True)
expansion = orth_ttr(uq.backend.order, distribution)
#%%
approx0 = fit_quadrature(expansion, nodes, weights, outdata[:, 0])
approxt = fit_quadrature(expansion, nodes, weights, outdata[:, 1:])
#%%

F0 = E(approx0, distribution)
dF = Std(approx0, distribution)
sobol1 = descriptives.sensitivity.Sens_m(approx0, distribution)
#sobolt = descriptives.sensitivity.Sens_t(approx0, distribution)
#sobol2 = descriptives.sensitivity.Sens_m2(approx0, distribution)

print('F = {} +- {}%'.format(F0, 100 * abs(dF / F0)))
print('1st order sensitivity indices:\n {}'.format(sobol1))
#print('Total order sensitivity indices:\n {}'.format(sobolt))
#print('2nd order sensitivity indices:\n {}'.format(sobol2))

#%%
                                                      rule='G',
                                                      sparse=False)
    # create vector to save the solution
    sol_odeint_full = np.zeros(len(nodes_full.T))

    # perform sparse pseudo-spectral approximation
    for j, n in enumerate(nodes_full.T):
        # each n is a vector with 5 components
        # n[0] = c, n[1] = k, c[2] = f, n[4] = y0, n[5] = y1
        init_cond = n[3], n[4]
        args = n[0], n[1], n[2], w
        sol_odeint_full[j] = discretize_oscillator_odeint(
            model, atol, rtol, init_cond, args, t, t_interest)

    # obtain the gpc approximation
    sol_gpc_full_approx = cp.fit_quadrature(P, nodes_full, weights_full,
                                            sol_odeint_full)

    # compute statistics
    mean_full = cp.E(sol_gpc_full_approx, distr_5D)
    var_full = cp.Var(sol_gpc_full_approx, distr_5D)
    ##################################################################

    #################### full grid computations #####################
    # get the sparse quadrature nodes and weight
    nodes_sparse, weights_sparse = cp.generate_quadrature(quad_deg_1D,
                                                          distr_5D,
                                                          rule='G',
                                                          sparse=True)
    # create vector to save the solution
    sol_odeint_sparse = np.zeros(len(nodes_sparse.T))
Пример #15
0
                                                    errorOperator2,
                                                    10**-10,
                                                    do_plot=False)
nodes, weights = adaptiveCombiInstanceExtend.get_points_and_weights()
print("Number of points:", len(nodes))
print("Sum of weights:", sum(weights))
weights = np.asarray(weights) * 1.0 / sum(weights)
nodes_transpose = list(zip(*nodes))

#################################################################################################
# propagate the uncertainty
value_of_interests = [model(node) for node in nodes]
value_of_interests = np.asarray(value_of_interests)
print("Mean", np.inner(weights, value_of_interests))
#################################################################################################
# generate orthogonal polynomials for the distribution
OP = cp.orth_ttr(3, dist)

#################################################################################################
# generate the general polynomial chaos expansion polynomial
gPCE = cp.fit_quadrature(OP, nodes_transpose, weights, value_of_interests)

#################################################################################################
# calculate statistics
E = cp.E(gPCE, dist)
StdDev = cp.Std(gPCE, dist)

#print the stastics
print("mean: %f" % E)
print("stddev: %f" % StdDev)
Пример #16
0
s = sqrt(log(std**2 / mean**2 + 1))
mu = log(mean) - 0.5 * s**2

params = []
for k in range(len(mu)):
    params.append(Normal(mu=mu[k], sigma=s[k]))

dist = J(*params)
#%%

nodes, weights = generate_quadrature(4, dist, rule='G', sparse=True)
expansion = orth_ttr(3, dist)

#%%
approx = fit_quadrature(expansion, nodes, weights, outdata)

#%%

F0 = E(approx, dist)
dF = Std(approx, dist)

#%%
plt.figure(figsize=(6, 3))
plt.plot(F0, 'k')
plt.fill_between(range(len(F0)), F0 - 1.96 * dF, F0 + 1.96 * dF,
                 alpha=0.2)  # 95% CI
plt.fill_between(range(len(F0)), F0 - 0.67 * dF, F0 + 0.67 * dF,
                 alpha=0.5)  # 50% CI
plt.grid(True)
plt.xlim(1, 80)
Пример #17
0
            # # legend()
            # # yscale('log')
            # import matplotlib.pyplot as plt
            # from mpl_toolkits.mplot3d import Axes3D
            # style.use('seaborn-paper')
            # title('Trayectorial dirigible')
            # figure()
            # plot(xs/1000,ys/1000,'-k')
            # xlabel(r'$x$ (km)')
            # ylabel(r'$y$ (km)')
            # results2 ={'nodes':nodes, 'weights':weights, 'pos':r_[xs,ys,zs], 'vel':r_[us,vs,ws], 'vel2':r_[p,q,r], 'angles':r_[phi, theta, xi], 'results' : result, 'fwind':fwind, 't' : dt*arange(nt)}
            # savez_compressed('Rdet_{}'.format(int(nodoN)), results = results2)
            nodoN += 1
        except Exception as esx:
            print(esx)
    foo_approx = cp.fit_quadrature(P, nodes, weights, point_results)
    mC = cp.E(foo_approx, dist)

    # u = load('Rsol_last.npz', allow_pickle=True)['results'].item()['results']['x']
    # T = u[:npoints]
    # deltat = u[npoints:]
    # # deltae = u[2*npoints:]
    # t_c = arange(nt)*dt
    # t = linspace(0,(nt-1)*dt, npoints)
    # tt = linspace(0,(nt-1)*dt, len(u)-int(len(u)/3))
    # tcontrol = interpolate.interp1d(t, T*10000, kind = 'cubic', fill_value='extrapolate')
    # deltatcontrol = interpolate.interp1d(tt, deltat, kind = 'cubic', fill_value='extrapolate')

    # tiempo = dt*arange(nt)
    # thrust = tcontrol(tiempo)
    # plot(tiempo, thrust, '-k', label='Estocástico')
Пример #18
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : pandas DataFrame
            Input data for analysis.

        Returns
        -------
        PCEAnalysisResults
            Use it to get the sobol indices and other information.
        """
        def sobols(P, coefficients):
            """ Utility routine to calculate sobols based on coefficients
            """
            A = np.array(P.coefficients) != 0
            multi_indices = np.array(
                [P.exponents[A[:, i]].sum(axis=0) for i in range(A.shape[1])])
            sobol_mask = multi_indices != 0
            _, index = np.unique(sobol_mask, axis=0, return_index=True)
            index = np.sort(index)
            sobol_idx_bool = sobol_mask[index]
            sobol_idx_bool = np.delete(sobol_idx_bool, [0], axis=0)
            n_sobol_available = sobol_idx_bool.shape[0]
            if len(coefficients.shape) == 1:
                n_out = 1
            else:
                n_out = coefficients.shape[1]
            n_coeffs = coefficients.shape[0]
            sobol_poly_idx = np.zeros([n_coeffs, n_sobol_available])
            for i_sobol in range(n_sobol_available):
                sobol_poly_idx[:, i_sobol] = np.all(
                    sobol_mask == sobol_idx_bool[i_sobol], axis=1)
            sobol = np.zeros([n_sobol_available, n_out])
            for i_sobol in range(n_sobol_available):
                sobol[i_sobol] = np.sum(np.square(
                    coefficients[sobol_poly_idx[:, i_sobol] == 1]),
                                        axis=0)
            idx_sort_descend_1st = np.argsort(sobol[:, 0], axis=0)[::-1]
            sobol = sobol[idx_sort_descend_1st, :]
            sobol_idx_bool = sobol_idx_bool[idx_sort_descend_1st]
            sobol_idx = [0 for _ in range(sobol_idx_bool.shape[0])]
            for i_sobol in range(sobol_idx_bool.shape[0]):
                sobol_idx[i_sobol] = np.array(
                    [i for i, x in enumerate(sobol_idx_bool[i_sobol, :]) if x])
            var = ((coefficients[1:]**2).sum(axis=0))
            sobol = sobol / var
            return sobol, sobol_idx, sobol_idx_bool

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
            'fit': {},
            'Fourier_coefficients': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        #        samples = {k: [] for k in qoi_cols}
        #        for run_id in data_frame[('run_id', 0)].unique():
        #            for k in qoi_cols:
        #                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
        #                samples[k].append(data.values.flatten())

        samples = {k: [] for k in qoi_cols}
        for k in qoi_cols:
            samples[k] = data_frame[k].values

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit, fc = cp.fit_regression(P, nodes, samples[k], retall=1)
            else:
                fit, fc = cp.fit_quadrature(P,
                                            nodes,
                                            weights,
                                            samples[k],
                                            retall=1)
            results['fit'][k] = fit
            results['Fourier_coefficients'][k] = fc

            # Percentiles: 1%, 10%, 50%, 90% and 99%
            P01, P10, P50, P90, P99 = cp.Perc(
                fit, [1, 10, 50, 90, 99], self.sampler.distribution).squeeze()
            results['percentiles'][k] = {
                'p01': P01,
                'p10': P10,
                'p50': P50,
                'p90': P90,
                'p99': P99
            }

            if self.sampling:  # use chaospy's sampling method

                # Statistical moments
                mean = cp.E(fit, self.sampler.distribution)
                var = cp.Var(fit, self.sampler.distribution)
                std = cp.Std(fit, self.sampler.distribution)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
                sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
                sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
                sobols_first_dict = {}
                sobols_second_dict = {}
                sobols_total_dict = {}
                for i, param_name in enumerate(self.sampler.vary.vary_dict):
                    sobols_first_dict[param_name] = sobols_first_narr[i]
                    sobols_second_dict[param_name] = sobols_second_narr[i]
                    sobols_total_dict[param_name] = sobols_total_narr[i]

                results['sobols_first'][k] = sobols_first_dict
                results['sobols_second'][k] = sobols_second_dict
                results['sobols_total'][k] = sobols_total_dict

            else:  # use PCE coefficients

                # Statistical moments
                mean = fc[0]
                var = np.sum(fc[1:]**2, axis=0)
                std = np.sqrt(var)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobol, sobol_idx, _ = sobols(P, fc)
                varied = [_ for _ in self.sampler.vary.get_keys()]
                S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                #S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
                #for v in varied: del S2[v][v]
                S2 = {
                    _: np.zeros((len(varied), sobol.shape[-1]))
                    for _ in varied
                }
                for n, si in enumerate(sobol_idx):
                    if len(si) == 1:
                        v = varied[si[0]]
                        S1[v] = sobol[n]
                    elif len(si) == 2:
                        v1 = varied[si[0]]
                        v2 = varied[si[1]]
                        #S2[v1][v2] = sobol[n]
                        #S2[v2][v1] = sobol[n]
                        S2[v1][si[1]] = sobol[n]
                        S2[v2][si[0]] = sobol[n]
                    for i in si:
                        ST[varied[i]] += sobol[n]

                results['sobols_first'][k] = S1
                results['sobols_second'][k] = S2
                results['sobols_total'][k] = ST

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
Пример #19
0
a = cp.Uniform(0, 0.1)
I = cp.Uniform(8, 10)
dist = cp.J(a, I)

num_tests = 100
order = 4

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
P, norms = cp.orth_ttr(order - 2, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1,
                                        dist,
                                        rule="G",
                                        sparse=False)
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

test_inputs = dist.sample(num_tests)
test_outputs = numpy.array([u(x, s[0], s[1]) for s in test_inputs.T])
surrogate_test_outputs = numpy.array(
    [U_hat(s[0], s[1]) for s in test_inputs.T])

print "mean l2 error", numpy.mean(
    numpy.linalg.norm(test_outputs - surrogate_test_outputs, axis=0))
print "mean l2 norm", numpy.mean(numpy.linalg.norm(test_outputs, axis=0))

i = 10
plt.scatter(test_outputs[:, i], surrogate_test_outputs[:, i])
plt.show()

plt.plot(test_outputs[:, i], '--')
Пример #20
0
# Create 3rd order quadrature scheme
nodes, weights = cp.generate_quadrature(
    order=3, domain=distribution, rule="Gaussian")

u0 = 0.3
# Evaluate model at the nodes
x = np.linspace(0, 1, 101)
samples = [model(x, u0, node[0], node[1], node[2])
           for node in nodes.T]

# Generate 3rd order orthogonal polynomial expansion
polynomials = cp.orth_ttr(order=3, dist=distribution)

# Create model approximation (surrogate solver)
model_approx = cp.fit_quadrature(
               polynomials, nodes, weights, samples)

# Model analysis
mean = cp.E(model_approx, distribution)
deviation = cp.Std(model_approx, distribution)

# Plot results
from matplotlib import pyplot as plt
plt.rc("figure", figsize=[8,6])
plt.fill_between(x, mean-deviation, mean+deviation, color="k",
        alpha=0.5)
plt.plot(x, mean, "k", lw=2)
plt.xlabel("depth $x$")
plt.ylabel("porosity $u$")
plt.legend(["mean $\pm$ deviation", "mean"])
plt.savefig("ode.pdf")
Пример #21
0
 else: # Polynomial Chaos Expansion !!!!
     if args.type == 'qmc':
         #Quasi MonteCarlo with 1/4 number of MC samples and PCE built from it
         samples = pdf.sample(n,'S')
         stateTensor = pool.map(Simulate,samples.T)
         if not args.no_save:
             savemat(saveDir+'SobolMC',{'states':stateTensor, 'samples':samples})
         
         # data = loadmat('./data/SobolMC')
         # samples = data['samples']
         # stateTensor = data['states']
         polynomials = cp.orth_ttr(order=2, dist=pdf)
         PCE = cp.fit_regression(polynomials, samples, stateTensor)
     elif args.type == 'pce':
         #Quadrature based PCE
         polynomials = cp.orth_ttr(order=2, dist=pdf)
         samples,weights = cp.generate_quadrature(order=2, domain=pdf, rule="Gaussian")
         stateTensor = pool.map(Simulate,samples.T)
         PCE = cp.fit_quadrature(polynomials,samples,weights,stateTensor)
         
     data = loadmat(saveDir+'MC') #Load the MC samples for an apples-to-apples comparison
     pceTestPoints = data['samples']    
     stateTensorPCE = np.array([PCE(*point) for point in pceTestPoints.T])
     Expectation = cp.E(poly=PCE,dist=pdf)
     if not args.no_save:      
         savemat(saveDir+'PCE2',{'states':stateTensorPCE, 'samples':pceTestPoints,'mean':Expectation})
     # savemat('./data/PCE2',{'states':stateTensorPCE[:,:,:,0], 'samples':pceTestPoints,'mean':Expectation})
 
 
 
 
u2 = cp.Uniform(0,1)
joint_distribution = cp.J(u1, u2)

# 2. generate orthogonal polynomials
polynomial_order = 3
poly = cp.orth_ttr(polynomial_order, joint_distribution)

# 4.1 generate quadrature nodes and weights
order = 5
nodes, weights = cp.generate_quadrature(order=order, domain=joint_distribution, rule='G')

# 4.2 evaluate the simple model for all nodes
model_evaluations = nodes[0]+nodes[1]*nodes[0]

# 4.3 use quadrature to generate the polynomial chaos expansion
gpce_quadrature = cp.fit_quadrature(poly, nodes, weights, model_evaluations)
# end example spectral projection

# example uq
exp_reg = cp.E(gpce_regression, joint_distribution)
exp_ps =  cp.E(gpce_quadrature, joint_distribution)

std_reg = cp.Std(gpce_regression, joint_distribution)
str_ps = cp.Std(gpce_quadrature, joint_distribution)

prediction_interval_reg = cp.Perc(gpce_regression, [5, 95], joint_distribution)
prediction_interval_ps = cp.Perc(gpce_quadrature, [5, 95], joint_distribution)

print("Expected values   Standard deviation            90 % Prediction intervals\n")
print(' E_reg |  E_ps     std_reg |  std_ps                pred_reg |  pred_ps')
print('  {} | {}       {:>6.3f} | {:>6.3f}       {} | {}'.format(exp_reg,
    distr_unif_w = cp.Uniform(0.95, 1.05)
    orth_polies = []
    for i, n in enumerate(N):
        # generate K Gaussian nodes and weights based on normal distr (we need to appr with quadratures)
        nodes, weights = cp.generate_quadrature(K[i], distr_unif_w, rule = "G") #nodes [[1,2]]
        # NOTE: for k == 2 it generates 3 nodes

        # approximating with gaussian polynomials ( for N == 1 at gives a polynomials up to a degree 1 => 2 polynomials)
        orth_poly = cp.orth_ttr(N[i], distr_unif_w, normed = True)

        #evaluate f(x) at all quadrature nodes and take the y(10), i.e [-1]
        y_out = [discretize_oscillator_odeint(model, init_cond, x_axis, (c, k, f, node), atol, rtol)[-1] for node in nodes[0]]

        # find generalized Polynomial chaos and expansion coefficients
        gPC_m, expansion_coeff = cp.fit_quadrature(orth_poly, nodes, weights, y_out, retall = True)
        #gPC_m = cp.fit_quadrature(orth_poly, nodes, weights, y_out)

        # gPC_m is the polynomial that approximates the most
        print(f'Expansion coeff chaospy: {expansion_coeff}')
        print(f'The best polynomial of degree {n} that approximates f(x): {cp.around(gPC_m, 1)}')
        print(f'Expansion coeff [0] = {expansion_coeff[0]}')#, expect_weights: {expect_y}')

        mu[i] = cp.E(gPC_m, distr_unif_w)
        V[i]= cp.Var(gPC_m, distr_unif_w)

        print("mu = %.8f,V = %.8f" % (mu[i], V[i]))


# manual calculation of the expansion coefficients
#Note if you do it in the same loop, the mean results changing only due to the fact that we do the loop over K[i] without any action
Пример #24
0
def plot_figures():
    """Plot figures for tutorial."""
    numpy.random.seed(1000)

    def foo(coord, param):
        return param[0] * numpy.e**(-param[1] * coord)

    coord = numpy.linspace(0, 10, 200)
    distribution = cp.J(cp.Uniform(1, 2), cp.Uniform(0.1, 0.2))

    samples = distribution.sample(50)
    evals = numpy.array([foo(coord, sample) for sample in samples.T])

    plt.plot(coord, evals.T, "k-", lw=3, alpha=0.2)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.savefig("demonstration.png")
    plt.clf()

    samples = distribution.sample(1000, "H")
    evals = [foo(coord, sample) for sample in samples.T]
    expected = numpy.mean(evals, 0)
    deviation = numpy.std(evals, 0)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using Monte Carlo simulation")
    plt.savefig("results_montecarlo.png")
    plt.clf()

    polynomial_expansion = cp.orth_ttr(8, distribution)
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)
    expected = cp.E(foo_approx, distribution)
    deviation = cp.Std(foo_approx, distribution)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using point collocation method")
    plt.savefig("results_collocation.png")
    plt.clf()

    absissas, weights = cp.generate_quadrature(8, distribution, "C")
    evals = [foo(coord, val) for val in absissas.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, absissas, weights,
                                   evals)
    expected = cp.E(foo_approx, distribution)
    deviation = cp.Std(foo_approx, distribution)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using psuedo-spectral projection method")
    plt.savefig("results_spectral.png")
    plt.clf()
Пример #25
0
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R = dist_R.sample(2 * len(P), "M")
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_regression(P, nodes_R, samples_u)

#Rosenblat transformation using pseudo spectral


def u(x, a, I):
    return I * np.exp(-a * x)


C = [[1, 0.5], [0.5, 1]]
mu = np.array([0, 0])
dist_R = cp.J(cp.Normal(), cp.Normal())
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R, weights_R = cp.generate_quadrature(3, dist_R)
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))
weights_Q = weights_R * dist_Q.pdf(nodes_Q) / dist_R.pdf(nodes_R)

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_quadrature(P, nodes_R, weights_Q, samples_u)
Пример #26
0
# Defining the random input distributions:
dists = [cp.Uniform(0, 0.5) for i in range(3)]
dists.append(cp.Uniform(-200.0, 50.0))
dist = cp.J(*dists)

num_tests = 100
order = 3

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
P, norms = cp.orth_ttr(order - 2, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1, dist, rule="G", sparse=False)
# solves = [u(s) for s in nodes.T]
solves = parmap(u, nodes.T)  # [u(s) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

test_inputs = dist.sample(num_tests)
test_outputs = numpy.array([u(s) for s in test_inputs.T])
surrogate_test_outputs = numpy.array([U_hat(*s) for s in test_inputs.T])

print "mean l2 error", numpy.mean(numpy.linalg.norm(test_outputs - surrogate_test_outputs, axis=0))
print "mean l2 norm", numpy.mean(numpy.linalg.norm(test_outputs, axis=0))

# scatter for all QOI
num_qoi = test_outputs.shape[1]
for qoi_i in range(num_qoi):
    plt.subplot(numpy.ceil(num_qoi / 3.0), 3, qoi_i + 1)
    plt.scatter(test_outputs[:, qoi_i], surrogate_test_outputs[:, qoi_i])
    plt.plot([test_outputs[0, qoi_i], test_outputs[-1, qoi_i]], [test_outputs[0, qoi_i], test_outputs[-1, qoi_i]], "--")
    plt.title("qoi %d" % qoi_i)
Пример #27
0
	def get_gpc_approx(self, P, nodes, weights, sample_func):
		gpc_approx = cp.fit_quadrature(P, nodes, weights, sample_func)

		return gpc_approx
Пример #28
0
P = cp.orth_ttr(2, dist_R)
nodes_R = dist_R.sample(2*len(P), "M")
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_regression(P, nodes_R, samples_u)




#Rosenblat transformation using pseudo spectral

def u(x,a, I):
    return I*np.exp(-a*x)

C = [[1,0.5],[0.5,1]]
mu = np.array([0, 0])
dist_R = cp.J(cp.Normal(), cp.Normal())
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R, weights_R = cp.generate_quadrature(3, dist_R)
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))
weights_Q = weights_R*dist_Q.pdf(nodes_Q)/dist_R.pdf(nodes_R)

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_quadrature(P, nodes_R, weights_Q, samples_u)
Пример #29
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get the Polynomial
        P = self.sampler.P

        # Get the PCE variante to use (Regression or Projection)
        regression = self.sampler.regression

        # Compute nodes (and weights)
        if regression:
            nodes = cp.generate_samples(order=self.sampler.n_samples,
                                        domain=self.sampler.distribution,
                                        rule=self.sampler.rule)
        else:
            nodes, weights = cp.generate_quadrature(
                order=self.sampler.quad_order,
                dist=self.sampler.distribution,
                rule=self.sampler.rule,
                sparse=self.sampler.quad_sparse,
                growth=self.sampler.quad_growth)

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame.run_id.unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame['run_id'] == run_id][k]
                samples[k].append(data.values)

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                if samples[k][0].dtype == object:
                    for i in range(self.sampler.count):
                        samples[k][i] = samples[k][i].astype("float64")
                fit = cp.fit_regression(P, nodes, samples[k], "T")
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles (Pxx)
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            ipar = 0
            i = 0
            for param_name in self.sampler.vary.get_keys():
                j = self.sampler.params_size[ipar]
                sobols_first_dict[param_name] = sobols_first_narr[i:i + j]
                sobols_second_dict[param_name] = sobols_second_narr[i:i + j]
                sobols_total_dict[param_name] = sobols_total_narr[i:i + j]
                i += j
                ipar += 1
            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return results
Пример #30
0
uq = profit.UQ(yaml='uq.yaml')
distribution = cp.J(*uq.params.values())
sparse = uq.backend.sparse
if sparse:
    order = 2 * 3
else:
    order = 3 + 1

# actually start the postprocessing now:

nodes, weights = cp.generate_quadrature(order,
                                        distribution,
                                        rule='G',
                                        sparse=sparse)
expansion, norms = cp.orth_ttr(3, distribution, retall=True)
approx_denit = cp.fit_quadrature(expansion, nodes, weights,
                                 np.mean(data[:, 1, :], axis=1))
approx_oxy = cp.fit_quadrature(expansion, nodes, weights,
                               np.mean(data[:, 0, :], axis=1))

annual_oxy = cp.fit_quadrature(expansion, nodes, weights, data[:, 0, :])
annual_denit = cp.fit_quadrature(expansion, nodes, weights, data[:, 1, :])

s_denit = cp.descriptives.sensitivity.Sens_m(annual_denit, distribution)
s_oxy = cp.descriptives.sensitivity.Sens_m(annual_oxy, distribution)

df_oxy = cp.Std(annual_oxy, distribution)
df_denit = cp.Std(annual_denit, distribution)
f0_oxy = cp.E(annual_oxy, distribution)
f0_denit = cp.E(annual_denit, distribution)
def run_test(testi, typid, exceed_evals=None, evals_end=None, max_time=None):
    problem_function_wrapped = FunctionCustom(lambda x: problem_function(x), output_dim=problem_function.output_length())
    op.f = problem_function_wrapped

    measure_start = time.time()
    multiple_evals = None
    typ = types[typid]
    if typ not in ("Gauss", "Fejer", "sparseGauss"):
        do_inverse_transform = typ in ("adaptiveTransBSpline", "adaptiveTransTrapez", "adaptiveTransHO")
        if do_inverse_transform:
            a_trans, b_trans = np.zeros(dim), np.ones(dim)

        if typ == "adaptiveHO":
            grid = GlobalHighOrderGridWeighted(a, b, op, boundary=uniform_distr)
        elif typ in ("adaptiveTrapez", "Trapez"):
            grid = GlobalTrapezoidalGridWeighted(a, b, op, boundary=uniform_distr)
        elif typ == "adaptiveLagrange":
            grid = GlobalLagrangeGridWeighted(a, b, op, boundary=uniform_distr)
        elif typ == "adaptiveTransBSpline":
            grid = GlobalBSplineGrid(a_trans, b_trans, boundary=uniform_distr)
        elif typ == "adaptiveTransTrapez":
            grid = GlobalTrapezoidalGrid(a_trans, b_trans, boundary=uniform_distr)
        elif typ == "adaptiveTransHO":
            grid = GlobalHighOrderGrid(a_trans, b_trans, boundary=uniform_distr, split_up=False)
        op.set_grid(grid)

        if do_inverse_transform:
            # Use Integration operation
            f_refinement = op.get_inverse_transform_Function(op.get_PCE_Function(poly_deg_max))
            # ~ f_refinement.plot(np.array([0.001]*2), np.array([0.999]*2), filename="trans.pdf")
            op_integration = Integration(f_refinement, grid, dim)
            combiinstance = SpatiallyAdaptiveSingleDimensions2(a_trans, b_trans, operation=op_integration,
                norm=2)
        else:
            combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op, norm=2)
            f_refinement = op.get_PCE_Function(poly_deg_max)

        lmax = 3
        if typ == "Trapez":
            lmax = testi + 2
        if evals_end is not None:
            multiple_evals = dict()
            combiinstance.performSpatiallyAdaptiv(1, lmax, f_refinement,
                error_operator, tol=0, max_evaluations=evals_end,
                print_output=True, solutions_storage=multiple_evals,
                max_time=max_time)
        elif exceed_evals is None or typ == "Trapez":
            combiinstance.performSpatiallyAdaptiv(1, lmax, f_refinement,
                error_operator, tol=0,
                max_evaluations=1,
                print_output=verbose)
        else:
            combiinstance.performSpatiallyAdaptiv(1, lmax, f_refinement,
                error_operator, tol=np.inf,
                max_evaluations=np.inf, min_evaluations=exceed_evals+1,
                print_output=verbose)

        # ~ combiinstance.plot()
        if multiple_evals is None:
            op.calculate_PCE(None, combiinstance)
    else:
        polys, polys_norms = cp.orth_ttr(poly_deg_max, op.distributions_joint, retall=True)
        if typ == "Gauss":
            if testi >= 29:
                # Reference solution or negative points
                return np.inf
            nodes, weights = cp.generate_quadrature(testi,
                op.distributions_joint, rule="G")
        elif typ == "Fejer":
            nodes, weights = cp.generate_quadrature(testi,
                op.distributions_joint, rule="F", normalize=True)
        elif typ == "sparseGauss":
            level = testi+1
            if level > 5:
                # normal distribution has infinite bounds
                return np.inf
            expectations = [distr[1] for distr in distris]
            standard_deviations = [distr[2] for distr in distris]
            hgrid = GaussHermiteGrid(expectations, standard_deviations)
            op.set_grid(hgrid)
            combiinstance = StandardCombi(a, b, operation=op)
            combiinstance.perform_combi(1, level, problem_function_wrapped)
            nodes, weights = combiinstance.get_points_and_weights()
            nodes = nodes.T

        f_evals = [problem_function_wrapped(c) for c in zip(*nodes)]
        op.gPCE = cp.fit_quadrature(polys, nodes, weights, np.asarray(f_evals), norms=polys_norms)

    print("simulation time: " + str(time.time() - measure_start) + " s")

    def reshape_result_values(vals): return vals[0]
    tmpdir = os.getenv("XDG_RUNTIME_DIR")
    results_path = tmpdir + "/uqtestSD.npy"
    solutions_data = []
    if os.path.isfile(results_path):
        solutions_data = list(np.load(results_path, allow_pickle=True))

    def calc_errors(op, num_evals):
        E, Var = op.get_expectation_PCE(), op.get_variance_PCE()
        E = reshape_result_values(E)
        Var = reshape_result_values(Var)

        # ~ err_descs = ("E prey", "P10 prey", "P90 prey", "Var prey")
        err_descs = ("E prey", "Var prey")
        err_data = (
            (E, E_ref),
            # ~ (P10, P10_ref),
            # ~ (P90, P90_ref),
            (Var, Var_ref)
        )
        errors = []
        for i,desc in enumerate(err_descs):
            vals = err_data[i]
            abs_err = error_absolute(*vals)
            rel_err = error_relative(*vals)
            errors.append(abs_err)
            errors.append(rel_err)
            print(f"{desc}: {vals[0]}, absolute error: {abs_err}, relative error: {rel_err}")

        result_data = (num_evals, timestep_problem, typid, errors)
        assert len(result_data) == 4
        assert len(errors) == 4
        return result_data

    if multiple_evals is None:
        num_evals = problem_function_wrapped.get_f_dict_size()
        result_data = calc_errors(op, num_evals)

        if all([any([d[i] != result_data[i] for i in range(3)]) for d in solutions_data]):
            solutions_data.append(result_data)
            np.save(results_path, solutions_data)

        return num_evals

    solutions = op.sort_multiple_solutions(multiple_evals)
    for num_evals, integrals in solutions:
        op.calculate_PCE_from_multiple(combiinstance, integrals)
        result_data = calc_errors(op, num_evals)

        if all([any([d[i] != result_data[i] for i in range(3)]) for d in solutions_data]):
            solutions_data.append(result_data)
    np.save(results_path, solutions_data)

    return problem_function_wrapped.get_f_dict_size()
Пример #32
0
import chaospy
import chaospy as cp
import pandas as pd
import numpy as np

QUAD_ORDER = 18
quad = False


def f(x, y):
    return (1 - x)**2 * 10 * (y - x**2)**2


distribution = chaospy.J(chaospy.Normal(0, 1), chaospy.Normal(0, 1))

if quad:
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    X, W = chaospy.generate_quadrature(QUAD_ORDER, distribution, rule="G")
    evals = [f(x[0], x[1]) for x in X.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, X, W, evals)
else:
    dat = pd.read_csv('./dakota_tabular.dat', sep=r'\s+')
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    samples = np.array([dat.x1, dat.x2])
    evals = dat.response_fn_1
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)

total = chaospy.descriptives.sensitivity.total.Sens_t(foo_approx, distribution)
main = chaospy.descriptives.sensitivity.main.Sens_m(foo_approx, distribution)
Пример #33
0
from time import time
from chaospy import Normal, generate_expansion

if __name__ == '__main__':

    prior_mean = 0.
    prior_sigma = 5.
    dim = 2
    prior = cpy.Iid(Normal(prior_mean, prior_sigma), dim)

    poly_order = 2
    abscissas, weights = cpy.generate_quadrature(poly_order,
                                                 prior,
                                                 rule='gaussian')
    expansion = generate_expansion(poly_order, prior, retall=False)

    def forward_model(params):
        return np.prod(np.exp(-params**2))

    evals = np.array([forward_model(sample) for sample in abscissas.T])
    surrogate = cpy.fit_quadrature(expansion, abscissas, weights, evals)

    coefficients = np.array(surrogate.coefficients)
    indeterminants = surrogate.indeterminants
    exponents = surrogate.exponents

    t = time()
    for _ in range(10000):
        surrogate(.5, 1.5)
    print(time() - t)
Пример #34
0
# Create 3rd order quadrature scheme
nodes, weights = cp.generate_quadrature(order=3,
                                        domain=distribution,
                                        rule="Gaussian")

u0 = 0.3
# Evaluate model at the nodes
x = np.linspace(0, 1, 101)
samples = [model(x, u0, node[0], node[1], node[2]) for node in nodes.T]

# Generate 3rd order orthogonal polynomial expansion
polynomials = cp.orth_ttr(order=3, dist=distribution)

# Create model approximation (surrogate solver)
model_approx = cp.fit_quadrature(polynomials, nodes, weights, samples)

# Model analysis
mean = cp.E(model_approx, distribution)
deviation = cp.Std(model_approx, distribution)

# Plot results
from matplotlib import pyplot as plt
plt.rc("figure", figsize=[8, 6])
plt.fill_between(x, mean - deviation, mean + deviation, color="k", alpha=0.5)
plt.plot(x, mean, "k", lw=2)
plt.xlabel("depth $x$")
plt.ylabel("porosity $u$")
plt.legend(["mean $\pm$ deviation", "mean"])
plt.savefig("ode.pdf")
Пример #35
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame[('run_id', 0)].unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
                samples[k].append(data.values.flatten())

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit = cp.fit_regression(P, nodes, samples[k])
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles: 10% and 90%
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            for i, param_name in enumerate(self.sampler.vary.vary_dict):
                sobols_first_dict[param_name] = sobols_first_narr[i]
                sobols_second_dict[param_name] = sobols_second_narr[i]
                sobols_total_dict[param_name] = sobols_total_narr[i]

            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))