예제 #1
0
def KG(z, evls, pnts, gp, kernel, NSAMPS=30, DEG=3, sampling=False):

    # Find initial minimum value from GP model
    min_val = 1e100
    X_sample = pnts
    Y_sample = evls
    #for x0 in [np.random.uniform(XL, XU, size=DIM) for oo in range(20)]:
    x0 = np.random.uniform(XL, XU, size=DIM)
    res = mini(gp, x0=x0,
               bounds=[(XL, XU)
                       for ss in range(DIM)])  #, method='Nelder-Mead')
    #res = mini(expected_improvement, x0=x0[0], bounds=[(XL, XU) for ss in range(DIM)], args=(X_sample, Y_sample, gp))#, callback=callb)
    #   if res.fun < min_val:
    min_val = res.fun
    min_x = res.x

    # estimate min(f^{n+1}) with MC simulation
    MEAN = 0
    points = np.atleast_2d(np.append(X_sample, z)).T
    m, s = gp(z, return_std=True)
    distribution = cp.J(cp.Normal(0, s))
    samples = distribution.sample(NSAMPS, rule='Halton')
    PCEevals = []
    for pp in range(NSAMPS):

        # construct future GP, using z as the next point
        evals = np.append(evls, m + samples[pp])
        #evals = np.append(evls, m + np.random.normal(0, s))
        gpnxt = GaussianProcessRegressor(kernel=kernel,
                                         n_restarts_optimizer=35,
                                         random_state=98765,
                                         normalize_y=True)
        gpnxt.fit(points, evals)

        # convinience function
        def gpf_next(x, return_std=False):
            alph, astd = gpnxt.predict(np.atleast_2d(x), return_std=True)
            alph = alph[0]
            if return_std:
                return (alph, astd)
            else:
                return alph

        res = mini(gpf_next, x0=x0, bounds=[(XL, XU) for ss in range(DIM)])
        min_next_val = res.fun
        min_next_x = res.x

        #print('+++++++++ ', res.fun)
        #MEAN += min_next_val
        PCEevals.append(min_next_val)
    if not sampling:
        polynomial_expansion = cp.orth_ttr(DEG, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
    else:
        MEAN = np.mean(PCEevals)
    #print(PCEevals, '...', MEAN)
    #hey
    #MEAN /= NSAMPS
    return min_val - MEAN
예제 #2
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
def polynomial_chaos_sens(Ns_pc,
                          jpdf,
                          polynomial_order,
                          poly=None,
                          return_reg=False):
    N_terms = int(len(jpdf) / 2)
    # 1. generate orthogonal polynomials
    poly = poly or cp.orth_ttr(polynomial_order, jpdf)
    # 2. generate samples with random sampling
    samples_pc = jpdf.sample(size=Ns_pc, rule='R')
    # 3. evaluate the model, to do so transpose samples and hash input data
    transposed_samples = samples_pc.transpose()
    samples_z = transposed_samples[:, :N_terms]
    samples_w = transposed_samples[:, N_terms:]
    model_evaluations = linear_model(samples_w, samples_z)
    # 4. calculate generalized polynomial chaos expression
    gpce_regression = cp.fit_regression(poly, samples_pc, model_evaluations)
    # 5. get sensitivity indices
    Spc = cp.Sens_m(gpce_regression, jpdf)
    Stpc = cp.Sens_t(gpce_regression, jpdf)

    if return_reg:
        return Spc, Stpc, gpce_regression
    else:
        return Spc, Stpc
예제 #4
0
def test_integration():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.orth_ttr(order, dist, retall=1)
    gq = cp.generate_quadrature
    nodes, weights = gq(order, dist, rule="C")
    vals = np.zeros((len(weights), size))
    cp.fit_quadrature(orth, nodes, weights, vals, norms=norms)
예제 #5
0
def fit():
    nodes, weights = generate_quadrature(4,
                                         distribution,
                                         rule='G',
                                         sparse=False)
    print(np.max(nodes - indata.T))
    expansion = orth_ttr(3, distribution)
    return fit_quadrature(expansion, nodes, weights, outdata)
예제 #6
0
def test_regression():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.orth_ttr(order, dist, retall=1)
    data = dist.sample(samples)
    vals = np.zeros((samples, size))
    cp.fit_regression(orth, data, vals, "LS")
    cp.fit_regression(orth, data, vals, "T", order=0)
    cp.fit_regression(orth, data, vals, "TC", order=0)
예제 #7
0
def EHI(x,
        gp1,
        gp2,
        xi=0.,
        x2=None,
        MD=None,
        NSAMPS=200,
        PCE=False,
        ORDER=2,
        PAR_RES=100):

    mu1, std1 = gp1(x, return_std=True)
    mu2, std2 = gp2(x, return_std=True)

    a, b, c = parEI(gp1, gp2, x2, '', EI=False, MD=MD, PAR_RES=PAR_RES)
    par = b.T[c, :]
    par += xi
    MEAN = 0  # running sum for observed hypervolume improvement
    if not PCE:  # Monte Carlo Sampling
        for ii in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemed from this point
            if idx[-1]:
                MEAN += H(newPar) - H(par)

        return (MEAN / NSAMPS)
    else:
        # Polynomial Chaos
        # (assumes 2 objective functions)
        distribution = cp.J(cp.Normal(0, std1), cp.Normal(0, std2))

        # sparse grid samples
        samples = distribution.sample(NSAMPS, rule='Halton')
        PCEevals = []
        for pp in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemes
            if idx[-1]:
                PCEevals.append(H(newPar) - H(par))
            else:
                PCEevals.append(0)
        polynomial_expansion = cp.orth_ttr(ORDER, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
        return (MEAN)
예제 #8
0
파일: test_orth.py 프로젝트: flo2k/chaospy
def test_orth_ttr():
    dist = cp.Normal(0, 1)
    orth = cp.orth_ttr(5, dist)
    outer = cp.outer(orth, orth)
    Cov1 = cp.E(outer, dist)
    Diatoric = Cov1 - np.diag(np.diag(Cov1))
    assert np.allclose(Diatoric, 0)

    Cov2 = cp.Cov(orth[1:], dist)
    assert np.allclose(Cov1[1:,1:], Cov2)
예제 #9
0
def test_orth_ttr():
    dist = cp.Normal(0, 1)
    orth = cp.orth_ttr(5, dist)
    outer = cp.outer(orth, orth)
    Cov1 = cp.E(outer, dist)
    Diatoric = Cov1 - np.diag(np.diag(Cov1))
    assert np.allclose(Diatoric, 0)

    Cov2 = cp.Cov(orth[1:], dist)
    assert np.allclose(Cov1[1:,1:], Cov2)
예제 #10
0
def SRPCostRS(p, sim, pdf):

    polynomials = cp.orth_ttr(order=2, dist=pdf)
    samples, weights = cp.generate_quadrature(order=2,
                                              domain=pdf,
                                              rule="Gaussian")
    stateTensor = [SRPCost(p, sim, s) for s in samples.T]
    # stateTensor = pool.map(OptCost,samples.T)
    PCE = cp.fit_quadrature(polynomials, samples, weights, stateTensor)

    # print "PCE Expectation: {} ".format(cp.E(poly=PCE,dist=pdf))
    return cp.E(poly=PCE, dist=pdf)
예제 #11
0
        def __init__(self,
                     dimension=None,
                     input=None,
                     output=None,
                     order=None):

            self.dimension = dimension
            self.input = np.transpose(input)
            self.output = output
            self.order = order

            self.distribution = cp.Iid(cp.Uniform(0, 1), self.dimension)
            orthogonal_expansion = cp.orth_ttr(self.order, self.distribution)
            self.poly = cp.fit_regression(orthogonal_expansion, self.input,
                                          self.output)
예제 #12
0
 def __init__(self, varsetInst, regen=True):
     self.varset = varsetInst.varset
     self.Nvar = varsetInst.Nvar
     self.jointDist = varsetInst.jointDist
     order = 3  #order of ortho polynomial
     if regen:
         self.polynomials = cp.orth_ttr(order, self.jointDist)
         self.nodes, self.weights = cp.generate_quadrature(order + 1,
                                                           self.jointDist,
                                                           rule="G",
                                                           sparse=True)
         pkl.dump([self.polynomials, self.nodes, self.weights],
                  open('pcecoeff.dump', 'wb'))
     else:
         [self.polynomials, self.nodes,
          self.weights] = pkl.load(open('pcecoeff.dump', 'rb'))
     self.nsamples = self.nodes.shape[1]
     self.co2Model = None
예제 #13
0
파일: post.py 프로젝트: redmod-team/profit
def evaluate_postprocessing(distribution, data, expansion):
    import matplotlib.pyplot as plt
    from profit import read_input
    from chaospy import generate_quadrature, orth_ttr, fit_quadrature, E, Std, descriptives

    nodes, weights = generate_quadrature(uq.backend.order + 1,
                                         distribution,
                                         rule='G')
    expansion = orth_ttr(uq.backend.order, distribution)
    approx = fit_quadrature(expansion, nodes, weights,
                            np.mean(data[:, 0, :], axis=1))
    urange = list(uq.params.values())[0].range()
    vrange = list(uq.params.values())[1].range()
    u = np.linspace(urange[0], urange[1], 100)
    v = np.linspace(vrange[0], vrange[1], 100)
    U, V = np.meshgrid(u, v)
    c = approx(U, V)

    # for 3 parameters:
    #wrange = list(uq.params.values())[2].range()
    #w = np.linspace(wrange[0], wrange[1], 100)
    #W = 0.03*np.ones(U.shape)
    #c = approx(U,V,W)

    plt.figure()
    plt.contour(U, V, c, 20)
    plt.colorbar()
    plt.scatter(config.eval_points[0, :],
                config.eval_points[1, :],
                c=np.mean(data[:, 0, :], axis=1))

    plt.show()

    F0 = E(approx, distribution)
    dF = Std(approx, distribution)
    sobol1 = descriptives.sensitivity.Sens_m(approx, distribution)
    sobolt = descriptives.sensitivity.Sens_t(approx, distribution)
    sobol2 = descriptives.sensitivity.Sens_m2(approx, distribution)

    print('F = {} +- {}%'.format(F0, 100 * abs(dF / F0)))
    print('1st order sensitivity indices:\n {}'.format(sobol1))
    print('Total order sensitivity indices:\n {}'.format(sobolt))
    print('2nd order sensitivity indices:\n {}'.format(sobol2))
def calculate_sobol_indices(quad_deg_1D, poly_deg_1D, joint_distr, sparse_bool,
                            title_names):
    nodes, weights = cp.generate_quadrature(quad_deg_1D,
                                            joint_distr,
                                            rule='G',
                                            sparse=sparse_bool)
    c, k, f, y0, y1 = nodes

    poly = cp.orth_ttr(poly_deg_1D, joint_distr, normed=True)

    y_out = [
        discretize_oscillator_odeint(model, atol, rtol, (y0_, y1_),
                                     (c_, k_, f_, w), t)[-1]
        for c_, k_, f_, y0_, y1_ in zip(c, k, f, y0, y1)
    ]

    # find generalized Polynomial chaos and expansion coefficients
    gPC_m, expansion_coeff = cp.fit_quadrature(poly,
                                               nodes,
                                               weights,
                                               y_out,
                                               retall=True)
    #print(f'The best polynomial of degree {poly_deg_1D} that approximates f(x): {cp.around(gPC_m, 1)}')
    # gPC_m is the polynomial that approximates the most
    print(
        f'Expansion coeff [0] (mean) for poly {poly_deg_1D} = {expansion_coeff[0]}'
    )  # , expect_weights: {expect_y}')
    #mu = cp.E(gPC_m, joint_distr)
    #print(f'Mean value from gPCE: {mu}')

    # Sobol indices
    first_order_Sobol_ind = cp.Sens_m(gPC_m, joint_distr)
    total_Sobol_ind = cp.Sens_t(gPC_m, joint_distr)

    print("The number of quadrature nodes for the grid is", len(nodes.T))
    print(f'The first order Sobol indices are \n {first_order_Sobol_ind}')
    print(f"The total Sobol' indices are \n {total_Sobol_ind}")

    plot_sobol_indices(first_order_Sobol_ind, title_names[0], False)
    plot_sobol_indices(total_Sobol_ind, title_names[1], False)

    return first_order_Sobol_ind, total_Sobol_ind
    def solve_nonlinear(self, params, unknowns, resids):

        power = params["dirPowers"]
        method_dict = params["method_dict"]
        dist = method_dict["distribution"]
        n = len(power)
        points, weights = cp.generate_quadrature(order=n - 1, domain=dist, rule="G")
        poly = cp.orth_ttr(
            n - 1, dist
        )  # Think about the n-1 for 1d for 2d or more it would be n-2. Details Dakota reference manual quadrature order.
        # Double check if giving me orthogonal polynomials
        # p2 = cp.outer(poly, poly)
        # norms = np.diagonal(cp.E(p2, dist))
        # print 'diag', norms

        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True, norms=norms)
        expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)
        # expansion, coeff = cp.fit_regression(poly, points, power, retall=True)

        mean = cp.E(expansion, dist, rule="G")
        # print 'mean cp.E =', mean
        # # mean = sum(power*weights)
        # print 'mean sum =', sum(power*weights)
        # print 'mean coeff =', coeff[0]*8760/1e6
        std = cp.Std(expansion, dist, rule="G")

        # print mean
        # print std
        # print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns["mean"] = mean * hours
        unknowns["std"] = std * hours

        # Modify the statistics to account for the truncation of the weibull (speed) case.
        modify_statistics(params, unknowns)  # It doesn't do anything for the direction case.

        print "In ChaospyStatistics"
예제 #16
0
파일: pce.py 프로젝트: di82yem/EasyVVUQ
    def __init__(self,
                 vary=None,
                 count=0,
                 polynomial_order=4,
                 regression=False,
                 rule="G",
                 sparse=False,
                 growth=False):
        """
        Create the sampler for the Polynomial Chaos Expansion using
        pseudo-spectral projection or regression (Point Collocation).

        Parameters
        ----------
        vary: dict or None
            keys = parameters to be sampled, values = distributions.

        count : int, optional
            Specified counter for Fast forward, default is 0.

        polynomial_order : int, optional
            The polynomial order, default is 4.

        regression : bool, optional
            If True, regression variante (point collecation) will be used,
            otherwise projection variante (pseud-spectral) will be used.
            Default value is False.

        rule : char, optional
            The quadrature method, in case of projection (default is Gaussian "G").
            The sequence sampler in case of regression (default is Hammersley "M")

        sparse : bool, optional
            If True, use Smolyak sparse grid instead of normal tensor product
            grid. Default value is False.

        growth (bool, None), optional
            If True, quadrature point became nested.
        """

        if vary is None:
            msg = ("'vary' cannot be None. RandomSampler must be passed a "
                   "dict of the names of the parameters you want to vary, "
                   "and their corresponding distributions.")
            logging.error(msg)
            raise Exception(msg)
        if not isinstance(vary, dict):
            msg = ("'vary' must be a dictionary of the names of the "
                   "parameters you want to vary, and their corresponding "
                   "distributions.")
            logging.error(msg)
            raise Exception(msg)
        if len(vary) == 0:
            msg = "'vary' cannot be empty."
            logging.error(msg)
            raise Exception(msg)

        self.vary = Vary(vary)
        self.polynomial_order = polynomial_order

        # List of the probability distributions of uncertain parameters
        params_distribution = list(vary.values())

        # Multivariate distribution
        self.distribution = cp.J(*params_distribution)

        # The orthogonal polynomials corresponding to the joint distribution
        self.P = cp.orth_ttr(polynomial_order, self.distribution)

        # The quadrature information
        self.quad_sparse = sparse
        self.rule = rule

        # Clenshaw-Curtis should be nested if sparse (#139 chaospy issue)
        self.quad_growth = growth
        cc = ['c', 'C', 'clenshaw_curtis', 'Clenshaw_Curtis']
        if sparse and rule in cc:
            self.quad_growth = True

        # To determinate the PCE vrainte to use
        self.regression = regression

        # Regression variante (Point collocation method)
        if regression:
            # Change the default rule
            if rule == "G":
                self.rule = "M"

            # Generates samples
            self._n_samples = 2 * len(self.P)
            self._nodes = cp.generate_samples(order=self._n_samples,
                                              domain=self.distribution,
                                              rule=self.rule)
            self._weights = None

        # Projection variante (Pseudo-spectral method)
        else:
            # Nodes and weights for the integration
            self._nodes, self._weights = cp.generate_quadrature(order=polynomial_order,
                                                                dist=self.distribution,
                                                                rule=self.rule,
                                                                sparse=sparse,
                                                                growth=self.quad_growth)
            # Number of samples
            self._n_samples = len(self._nodes[0])

        # Fast forward to specified count, if possible
        self.count = 0
        if self.count >= self._n_samples:
            msg = (f"Attempt to start sampler fastforwarded to count {self.count}, "
                   f"but sampler only has {self.n_samples} samples, therefore"
                   f"this sampler will not provide any more samples.")
            logging.warning(msg)
        else:
            for i in range(count):
                self.__next__()
예제 #17
0
def gpc(dists, distsMeta, wallModel, order, hdf5group, sampleScheme='M'):
    print "\n GeneralizedPolynomialChaos - order {}\n".format(order)

    dim = len(dists)

    expansionOrder = order
    numberOfSamples = 4 * cp.terms(expansionOrder, dim)

    # Sample in independent space
    samples = dists.sample(numberOfSamples, sampleScheme).transpose()
    model = wallModel(distsMeta)

    # Evaluate the model (which is not linear obviously)
    pool = multiprocessing.Pool()
    data = pool.map(model, samples)
    pool.close()
    pool.join()
    C_data = [retval[0] for retval in data]
    a_data = [retval[1] for retval in data]

    C_data = np.array(C_data)
    a_data = np.array(a_data)
    # Orthogonal C_polynomial from marginals
    orthoPoly = cp.orth_ttr(expansionOrder, dists)

    for data, outputName in zip([C_data, a_data], ['Compliance', 'Area']):

        # Fit the model together in independent space
        C_polynomial = cp.fit_regression(orthoPoly, samples.transpose(), data)

        # save data to dictionary
        plotMeanConfidenceAlpha = 5

        C_mean = cp.E(C_polynomial, dists)
        C_std = cp.Std(C_polynomial, dists)

        Si = cp.Sens_m(C_polynomial, dists)
        STi = cp.Sens_t(C_polynomial, dists)

        C_conf = cp.Perc(
            C_polynomial,
            [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
            dists)

        a = np.linspace(0, 100, 1000)
        da = a[1] - a[0]
        C_cdf = cp.Perc(C_polynomial, a, dists)

        C_pdf = da / (C_cdf[1::] - C_cdf[0:-1])
        # Resample to generate full histogram
        samples2 = dists.sample(numberOfSamples * 100, sampleScheme)
        C_data2 = C_polynomial(*samples2).transpose()

        # save in hdf5 file
        solutionDataGroup = hdf5group.create_group(outputName)

        solutionData = {
            'mean': C_mean,
            'std': C_std,
            'confInt': C_conf,
            'Si': Si,
            'STi': STi,
            'cDataGPC': C_data,
            'samplesGPC': samples,
            'cData': C_data2,
            'samples': samples2.transpose(),
            'C_pdf': C_pdf
        }

        for variableName, variableValue in solutionData.iteritems():
            solutionDataGroup.create_dataset(variableName, data=variableValue)
예제 #18
0
    totalvarianceqMC = np.add(totalvarianceqMC, varianceqMC)


totalerrorqMC = np.divide(totalerrorqMC, reruns)
totalvarianceqMC = np.divide(totalvarianceqMC, reruns)



errorCP = []
varCP = []

K = []

N = 5
for n in xrange(0,N+1):
    P = cp.orth_ttr(n, dist)
    nodes, weights = cp.generate_quadrature(n+1, dist, rule="G")
    K.append(len(nodes[0]))
    i1,i2 = np.mgrid[:len(weights), :Nt]
    solves = u(T[i2],nodes[0][i1],nodes[1][i1])

    U_hat = cp.fit_quadrature(P, nodes, weights, solves)
    errorCP.append(dt*np.sum(np.abs(E_analytical(T) - cp.E(U_hat,dist))))
    varCP.append(dt*np.sum(np.abs(V_analytical(T) - cp.Var(U_hat,dist))))


# pl.rc("figure", figsize=[6,4])

ax, tableau20 = prettyPlot()
pl.plot(-1, 1, "k-", linewidth=2)
pl.plot(-1, 1, "k--", linewidth=2)
예제 #19
0
def test_orthogonals():
    dist = cp.Iid(cp.Normal(), dim)
    cp.orth_gs(order, dist)
    cp.orth_ttr(order, dist)
    cp.orth_chol(order, dist)
예제 #20
0
import chaospy
import chaospy as cp
import pandas as pd
import numpy as np

QUAD_ORDER = 18
quad = False


def f(x, y):
    return (1 - x)**2 * 10 * (y - x**2)**2


distribution = chaospy.J(chaospy.Normal(0, 1), chaospy.Normal(0, 1))

if quad:
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    X, W = chaospy.generate_quadrature(QUAD_ORDER, distribution, rule="G")
    evals = [f(x[0], x[1]) for x in X.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, X, W, evals)
else:
    dat = pd.read_csv('./dakota_tabular.dat', sep=r'\s+')
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    samples = np.array([dat.x1, dat.x2])
    evals = dat.response_fn_1
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)

total = chaospy.descriptives.sensitivity.total.Sens_t(foo_approx, distribution)
main = chaospy.descriptives.sensitivity.main.Sens_m(foo_approx, distribution)
예제 #21
0
파일: pce.py 프로젝트: rocdat/EasyVVUQ
    def __init__(self,
                 vary=None,
                 count=0,
                 polynomial_order=4,
                 quadrature_rule="G",
                 sparse=False,
                 growth=None):
        """
        Create the sampler for the Polynomial Chaos Expansion method using
        pseudo-spectral projection.

        Parameters
        ----------
        vary: dict or None
            keys = parameters to be sampled, values = distributions.

        count : int, optional
            Specified counter for Fast forward, default is 0.

        polynomial_order : int, optional
            The polynomial order, default is 4.

        quadrature_rule : char, optional
            The quadrature method, default is Gaussian "G".

        sparse : bool, optional
            If True, use Smolyak sparse grid instead of normal tensor product
            grid. Default value is False.

        growth (bool, None), optional
            If True, quadrature point became nested for sparse grids.
            Default value is the same as ``sparse`` if omitted, otherwise None.
        """

        if vary is None:
            msg = ("'vary' cannot be None. RandomSampler must be passed a "
                   "dict of the names of the parameters you want to vary, "
                   "and their corresponding distributions.")
            logging.error(msg)
            raise Exception(msg)
        if not isinstance(vary, dict):
            msg = ("'vary' must be a dictionary of the names of the "
                   "parameters you want to vary, and their corresponding "
                   "distributions.")
            logging.error(msg)
            raise Exception(msg)
        if len(vary) == 0:
            msg = "'vary' cannot be empty."
            logging.error(msg)
            raise Exception(msg)

        self.vary = Vary(vary)
        self.polynomial_order = polynomial_order

        # List of the probability distributions of uncertain parameters
        params_distribution = list(vary.values())

        # Multivariate distribution
        self.distribution = cp.J(*params_distribution)

        # The orthogonal polynomials corresponding to the joint distribution
        self.P = cp.orth_ttr(polynomial_order, self.distribution)

        # The quadrature information: order, rule and sparsity
        self.quad_order = polynomial_order + 1
        self.quad_rule = quadrature_rule
        self.quad_sparse = sparse
        if sparse:
            self.quad_growth = True
        else:
            self.quad_growth = growth

        # Nodes and weights for the integration
        self._nodes, _ = cp.generate_quadrature(order=self.quad_order,
                                                dist=self.distribution,
                                                rule=quadrature_rule,
                                                sparse=sparse,
                                                growth=self.quad_growth)

        # Number of samples
        self._number_of_samples = len(self._nodes[0])

        # Fast forward to specified count, if possible
        self.count = 0
        if self.count >= self._number_of_samples:
            msg = (
                f"Attempt to start sampler fastforwarded to count {self.count}, "
                f"but sampler only has {self._number_of_samples} samples, therefore"
                f"this sampler will not provide any more samples.")
            logging.warning(msg)
        else:
            for i in range(count):
                self.__next__()
예제 #22
0
uq = profit.UQ(yaml='uq.yaml')
distribution = cp.J(*uq.params.values())
sparse = uq.backend.sparse
if sparse:
    order = 2 * 3
else:
    order = 3 + 1

# actually start the postprocessing now:

nodes, weights = cp.generate_quadrature(order,
                                        distribution,
                                        rule='G',
                                        sparse=sparse)
expansion, norms = cp.orth_ttr(3, distribution, retall=True)
approx_denit = cp.fit_quadrature(expansion, nodes, weights,
                                 np.mean(data[:, 1, :], axis=1))
approx_oxy = cp.fit_quadrature(expansion, nodes, weights,
                               np.mean(data[:, 0, :], axis=1))

annual_oxy = cp.fit_quadrature(expansion, nodes, weights, data[:, 0, :])
annual_denit = cp.fit_quadrature(expansion, nodes, weights, data[:, 1, :])

s_denit = cp.descriptives.sensitivity.Sens_m(annual_denit, distribution)
s_oxy = cp.descriptives.sensitivity.Sens_m(annual_oxy, distribution)

df_oxy = cp.Std(annual_oxy, distribution)
df_denit = cp.Std(annual_denit, distribution)
f0_oxy = cp.E(annual_oxy, distribution)
f0_denit = cp.E(annual_denit, distribution)
예제 #23
0
labels = data['f0']  # parameter labels
mean = data['f2']  # E0
std = data['f3']  # sqrt(Var0)

s = sqrt(log(std**2 / mean**2 + 1))
mu = log(mean) - 0.5 * s**2

params = []
for k in range(len(mu)):
    params.append(Normal(mu=mu[k], sigma=s[k]))

dist = J(*params)
#%%

nodes, weights = generate_quadrature(4, dist, rule='G', sparse=True)
expansion = orth_ttr(3, dist)

#%%
approx = fit_quadrature(expansion, nodes, weights, outdata)

#%%

F0 = E(approx, dist)
dF = Std(approx, dist)

#%%
plt.figure(figsize=(6, 3))
plt.plot(F0, 'k')
plt.fill_between(range(len(F0)), F0 - 1.96 * dF, F0 + 1.96 * dF,
                 alpha=0.2)  # 95% CI
plt.fill_between(range(len(F0)), F0 - 0.67 * dF, F0 + 0.67 * dF,
예제 #24
0
파일: test_orth.py 프로젝트: flo2k/chaospy
def test_orth_chol():
    dist = cp.Normal(0, 1)
    orth1 = cp.orth_ttr(5, dist, normed=True)
    orth2 = cp.orth_chol(5, dist, normed=True)
    eps = cp.sum((orth1-orth2)**2)
    assert np.allclose(eps(np.linspace(-100, 100, 5)), 0)
예제 #25
0
파일: test_orth.py 프로젝트: flo2k/chaospy
def test_orth_norms():
    dist = cp.Normal(0, 1)
    orth = cp.orth_ttr(5, dist, normed=True)
    norms = cp.E(orth**2, dist)
    assert np.allclose(norms, 1)
        eps65 = cp.Normal(mu = mu, sigma = sigma)

        joint_KL = cp.J(eps1,eps2,eps3,eps4,eps5,eps6,eps7,eps8,eps9,eps10)
                       # eps11,eps12,eps13,eps14,eps15,eps16,eps17,eps18,eps19,eps20,
                       # eps21,eps22,eps23,eps24,eps25,eps26,eps27,eps28,eps29,eps30,
                       # eps31,eps32,eps33,eps34,eps35,eps36,eps37,eps38,eps39,eps40)
                       # eps41,eps42,eps43,eps44,eps45,eps46,eps47,eps48,eps49,eps50,
                       # eps51,eps52,eps53,eps54,eps55,eps56,eps57,eps58,eps59,eps60,
                       # eps61,eps62,eps63,eps64,eps65)

        # Number of terms in karhunen-loeve expansion
        nkl = 10

        # Polynomial Chaos Expansion: three terms recursion relation
        order = 1
        Polynomials = cp.orth_ttr(order, joint_KL)

        # Generate samples by quasi-random samples
        qmc_scheme = "H"
        nnodes = 2*len(Polynomials)
        nodes = joint_KL.sample(nnodes, qmc_scheme)

        # Regression method in Point Collocation
        rgm_rule ="T" #Orthogonal Matching Pursuit#"LS"


        # Random Field
        p , l = np.inf, 1
        k = nkl
        kernel = Matern(p = p,l = l)    #0,1Exponential covariance kernel
        kle  = KLE(model.mesh, kernel, verbose = True)
예제 #27
0
    return I * numpy.exp(-a * x)


x = numpy.linspace(0, 10, 1000)

# Defining the random input distributions:
a = cp.Uniform(0, 0.1)
I = cp.Uniform(8, 10)
dist = cp.J(a, I)

num_tests = 100
order = 4

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
P, norms = cp.orth_ttr(order - 2, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1,
                                        dist,
                                        rule="G",
                                        sparse=False)
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

test_inputs = dist.sample(num_tests)
test_outputs = numpy.array([u(x, s[0], s[1]) for s in test_inputs.T])
surrogate_test_outputs = numpy.array(
    [U_hat(s[0], s[1]) for s in test_inputs.T])

print "mean l2 error", numpy.mean(
    numpy.linalg.norm(test_outputs - surrogate_test_outputs, axis=0))
print "mean l2 norm", numpy.mean(numpy.linalg.norm(test_outputs, axis=0))
    # create the multivariate distribution
    distr_5D = cp.J(distr_c, distr_k, distr_f, distr_y0, distr_y1)

    # quad deg 1D
    quad_deg_1D = 3
    poly_deg_1D = 3

    # time domain setup
    t_max = 20.
    dt = 0.01
    grid_size = int(t_max / dt) + 1
    t = np.array([i * dt for i in range(grid_size)])
    t_interest = len(t) / 2

    # create the orthogonal polynomials
    P = cp.orth_ttr(poly_deg_1D, distr_5D)

    #################### full grid computations #####################
    # get the non-sparse quadrature nodes and weight
    nodes_full, weights_full = cp.generate_quadrature(quad_deg_1D,
                                                      distr_5D,
                                                      rule='G',
                                                      sparse=False)
    # create vector to save the solution
    sol_odeint_full = np.zeros(len(nodes_full.T))

    # perform sparse pseudo-spectral approximation
    for j, n in enumerate(nodes_full.T):
        # each n is a vector with 5 components
        # n[0] = c, n[1] = k, c[2] = f, n[4] = y0, n[5] = y1
        init_cond = n[3], n[4]
예제 #29
0
import chaospy as cp
import numpy as np
import odespy


#Intrusive Galerkin method

dist_a = cp.Uniform(0, 0.1)
dist_I = cp.Uniform(8, 10)
dist = cp.J(dist_a, dist_I) # joint multivariate dist

P, norms = cp.orth_ttr(2, dist, retall=True)
variable_a, variable_I = cp.variable(2)

PP = cp.outer(P, P)
E_aPP = cp.E(variable_a*PP, dist)
E_IP = cp.E(variable_I*P, dist)

def right_hand_side(c, x):            # c' = right_hand_side(c, x)
    return -np.dot(E_aPP, c)/norms    # -M*c

initial_condition = E_IP/norms
solver = odespy.RK4(right_hand_side)
solver.set_initial_condition(initial_condition)
x = np.linspace(0, 10, 1000)
c = solver.solve(x)[0]
u_hat = cp.dot(P, c)



#Rosenblat transformation using point collocation
예제 #30
0
plt.plot(indata['Rf'], outdata[:, 0], 'x')
plt.xlabel('Rf')

plt.figure()
plt.plot(indata['alfa'], indata['Rf'], 'x')
plt.xlabel('alpha')
plt.ylabel('Rf')

#%%

distribution = J(*uq.params.values())
nodes, weights = generate_quadrature(uq.backend.order + 1,
                                     distribution,
                                     rule='G',
                                     sparse=True)
expansion = orth_ttr(uq.backend.order, distribution)
#%%
approx0 = fit_quadrature(expansion, nodes, weights, outdata[:, 0])
approxt = fit_quadrature(expansion, nodes, weights, outdata[:, 1:])
#%%

F0 = E(approx0, distribution)
dF = Std(approx0, distribution)
sobol1 = descriptives.sensitivity.Sens_m(approx0, distribution)
#sobolt = descriptives.sensitivity.Sens_t(approx0, distribution)
#sobol2 = descriptives.sensitivity.Sens_m2(approx0, distribution)

print('F = {} +- {}%'.format(F0, 100 * abs(dF / F0)))
print('1st order sensitivity indices:\n {}'.format(sobol1))
#print('Total order sensitivity indices:\n {}'.format(sobolt))
#print('2nd order sensitivity indices:\n {}'.format(sobol2))
예제 #31
0
파일: ode.py 프로젝트: apetcho/chaospy
c2 = cp.Uniform(0.03, 0.07)
# Joint probability distribution
distribution = cp.J(c0, c1, c2)

# Create 3rd order quadrature scheme
nodes, weights = cp.generate_quadrature(
    order=3, domain=distribution, rule="Gaussian")

u0 = 0.3
# Evaluate model at the nodes
x = np.linspace(0, 1, 101)
samples = [model(x, u0, node[0], node[1], node[2])
           for node in nodes.T]

# Generate 3rd order orthogonal polynomial expansion
polynomials = cp.orth_ttr(order=3, dist=distribution)

# Create model approximation (surrogate solver)
model_approx = cp.fit_quadrature(
               polynomials, nodes, weights, samples)

# Model analysis
mean = cp.E(model_approx, distribution)
deviation = cp.Std(model_approx, distribution)

# Plot results
from matplotlib import pyplot as plt
plt.rc("figure", figsize=[8,6])
plt.fill_between(x, mean-deviation, mean+deviation, color="k",
        alpha=0.5)
plt.plot(x, mean, "k", lw=2)
예제 #32
0
import chaospy as cp
import numpy as np
import odespy

#Intrusive Galerkin method

dist_a = cp.Uniform(0, 0.1)
dist_I = cp.Uniform(8, 10)
dist = cp.J(dist_a, dist_I)  # joint multivariate dist

P, norms = cp.orth_ttr(2, dist, retall=True)
variable_a, variable_I = cp.variable(2)

PP = cp.outer(P, P)
E_aPP = cp.E(variable_a * PP, dist)
E_IP = cp.E(variable_I * P, dist)


def right_hand_side(c, x):  # c' = right_hand_side(c, x)
    return -np.dot(E_aPP, c) / norms  # -M*c


initial_condition = E_IP / norms
solver = odespy.RK4(right_hand_side)
solver.set_initial_condition(initial_condition)
x = np.linspace(0, 10, 1000)
c = solver.solve(x)[0]
u_hat = cp.dot(P, c)

#Rosenblat transformation using point collocation
예제 #33
0
yaw_1_train_scaled = Input_MZ_MY_Turb2[0][I1*I2]/30
yaw_2_train_scaled = Input_MZ_MY_Turb2[1][I1*I2]/30
Distance_train_scaled = Input_MZ_MY_Turb2[2][I1*I2]/np.max(Input_MZ_MY_Turb2[2])
MZ_MY_scaled = Input_MZ_MY_Turb2[3][I1*I2]/np.max(Input_MZ_MY_Turb2[3])

Input_MZ_MY_Turb2 = None
Input_MZ_MY_Turb2 = [yaw_1_train_scaled, yaw_2_train_scaled, Distance_train_scaled, MZ_MY_scaled]

Dist_max = np.max(Input_Power2[2])

########## Creating the individual surrogate models for the power and the DEL of the upstream and downstream turbine ###############################
####################################################################################################################################################

# Creating the surrogate model for the power of the upstream turbine
distribution1 = cp.J(cp.Normal(0, 4.95/30))
orthogonal_expansion_ttr1 = cp.orth_ttr(3, distribution1 ) 

Matrix = []
Input = Input_Power1;
for drand in range(0, 9):
    I_rand = random.sample(range(1, len(Input[0])), int(0.9*len(Input[0])))
    approx_model_ttr_Power1  = cp.fit_regression(orthogonal_expansion_ttr1, [Input[0][I_rand]],Input[1][I_rand])
    Coefs = []
    for dq in range(0, len(approx_model_ttr_Power1.keys)):
        a = approx_model_ttr_Power1.A[approx_model_ttr_Power1.keys[dq]]
        Coefs.append(a.tolist())
    Matrix.append(Coefs)
Coefs = np.mean(Matrix, axis = 0)
for dq in range(0, len(approx_model_ttr_Power1.keys)):
    approx_model_ttr_Power1.A[approx_model_ttr_Power1.keys[dq]] = Coefs[dq]
예제 #34
0
    Ns_mc = 1000000
    # calculate sensitivity indices
    A_s, B_s, C_s, f_A, f_B, f_C, S_mc, ST_mc = mc_sensitivity_linear(Ns_mc, jpdf, w)

    Sensitivities=np.column_stack((S_mc,s**2))
    row_labels= ['S_'+str(idx) for idx in range(1,Nrv+1)]
    print("First Order Indices")
    print(pd.DataFrame(Sensitivities,columns=['Smc','Sa'],index=row_labels).round(3))
    # end Monte Carlo

    # Polychaos computations
    Ns_pc = 80
    samples_pc = jpdf.sample(Ns_pc)
    polynomial_order = 4
    poly = cp.orth_ttr(polynomial_order, jpdf)
    Y_pc = linear_model(w, samples_pc.T)
    approx = cp.fit_regression(poly, samples_pc, Y_pc, rule="T")

    exp_pc = cp.E(approx, jpdf)
    std_pc = cp.Std(approx, jpdf)
    print("Statistics polynomial chaos\n")
    print('\n        E(Y)  |  std(Y) \n')
    print('pc  : {:2.5f} | {:2.5f}'.format(float(exp_pc), std_pc))
    
    
    S_pc = cp.Sens_m(approx, jpdf)

    Sensitivities=np.column_stack((S_mc,S_pc, s**2))
    print("\nFirst Order Indices")
    print(pd.DataFrame(Sensitivities,columns=['Smc','Spc','Sa'],index=row_labels).round(3))
예제 #35
0
파일: gpc.py 프로젝트: ionutfarcas/UQ_tests
	def get_orth_poly(self, poly_deg, dist):
		P = cp.orth_ttr(poly_deg, dist, normed=True)

		return P
예제 #36
0
# The model solver
def u(z):
    return campaspe_toy.run(*z, nrow=10)


# Defining the random input distributions:
dists = [cp.Uniform(0, 0.5) for i in range(3)]
dists.append(cp.Uniform(-200.0, 50.0))
dist = cp.J(*dists)

num_tests = 100
order = 3

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
P, norms = cp.orth_ttr(order - 2, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1, dist, rule="G", sparse=False)
# solves = [u(s) for s in nodes.T]
solves = parmap(u, nodes.T)  # [u(s) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

test_inputs = dist.sample(num_tests)
test_outputs = numpy.array([u(s) for s in test_inputs.T])
surrogate_test_outputs = numpy.array([U_hat(*s) for s in test_inputs.T])

print "mean l2 error", numpy.mean(numpy.linalg.norm(test_outputs - surrogate_test_outputs, axis=0))
print "mean l2 norm", numpy.mean(numpy.linalg.norm(test_outputs, axis=0))

# scatter for all QOI
num_qoi = test_outputs.shape[1]
for qoi_i in range(num_qoi):
예제 #37
0
                                                    errorOperator2,
                                                    10**-10,
                                                    do_plot=False)
nodes, weights = adaptiveCombiInstanceExtend.get_points_and_weights()
print("Number of points:", len(nodes))
print("Sum of weights:", sum(weights))
weights = np.asarray(weights) * 1.0 / sum(weights)
nodes_transpose = list(zip(*nodes))

#################################################################################################
# propagate the uncertainty
value_of_interests = [model(node) for node in nodes]
value_of_interests = np.asarray(value_of_interests)
print("Mean", np.inner(weights, value_of_interests))
#################################################################################################
# generate orthogonal polynomials for the distribution
OP = cp.orth_ttr(3, dist)

#################################################################################################
# generate the general polynomial chaos expansion polynomial
gPCE = cp.fit_quadrature(OP, nodes_transpose, weights, value_of_interests)

#################################################################################################
# calculate statistics
E = cp.E(gPCE, dist)
StdDev = cp.Std(gPCE, dist)

#print the stastics
print("mean: %f" % E)
print("stddev: %f" % StdDev)
예제 #38
0
I = cp.Uniform(8, 10)
dist = cp.J(a, I)


## Monte Carlo integration
samples = dist.sample(10**5)
u_mc = [u(x, *s) for s in samples.T]

mean = np.mean(u_mc, 1)
var = np.var(u_mc, 1)


## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
order = 5
P, norms = cp.orth_ttr(order, dist, retall=True)
nodes, weights = cp.generate_quadrature(order+1, dist, rule="G")
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

mean = cp.E(U_hat, dist)
var = cp.Var(U_hat, dist)


## Polynomial chaos expansion
## using Point collocation method and quasi-random samples
order = 5
P = cp.orth_ttr(order, dist)
nodes = dist.sample(2*len(P), "M")
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_regression(P, nodes, solves, rule="T")
예제 #39
0
def test_descriptives():
    dist = cp.Iid(cp.Normal(), dim)
    orth = cp.orth_ttr(order, dist)
    cp.E(orth, dist)
    cp.Var(orth, dist)
    cp.Cov(orth, dist)
예제 #40
0
    def test_circuit_model_order_2(self, order_cp: int = 2, bool_plot: bool = False):
        dim = 6
        key = ["R_b1", "R_b2", "R_f", "R_c1", "R_c2", "beta"]
        sobol_indices_quad_constantine = np.array(
            [5.0014515064e-01, 4.1167859899e-01, 7.4006053045e-02, 2.1802568214e-02, 5.1736552010e-08,
             1.4938996627e-05])

        M_constantine = np.array([50, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4])
        sobol_indices_error_constantine = np.transpose(np.array(
            [[6.1114622870e-01, 2.7036543475e-01, 1.5466638009e-01, 1.2812367577e-01, 5.0229955234e-02,
              3.5420048253e-02, 1.4486328386e-02],
             [6.0074404490e-01, 3.2024096457e-01, 1.2296426366e-01, 9.6725945246e-02, 5.3143328175e-02,
              3.2748864016e-02, 1.1486316472e-02],
             [1.1789694228e-01, 4.6150927239e-02, 2.6268692965e-02, 1.8450563871e-02, 8.3656592318e-03,
              5.8550974309e-03, 2.8208921925e-03],
             [3.8013619286e-02, 1.6186288112e-02, 8.9893920304e-03, 6.3911249578e-03, 2.6219049423e-03,
              1.9215077698e-03, 9.5390224479e-04],
             [1.2340746448e-07, 4.8204289233e-08, 3.0780845307e-08, 2.5240466147e-08, 1.0551377101e-08,
              6.9506139894e-09, 3.3372151408e-09],
             [3.4241277775e-05, 1.8074628532e-05, 7.1554659714e-06, 5.0303467614e-06, 2.7593313990e-06,
              1.9529470403e-06, 7.2840043686e-07]]))

        x_lower = np.array([50, 25, 0.5, 1.2, 0.25, 50])  # table 3, constantine-2017
        x_upper = np.array([150, 70, 3.0, 2.5, 1.2, 300])  # table 3, constantine-2017

        circuit_model = CircuitModel()
        n_samples = M_constantine
        iN_vec = n_samples.astype(int)  # 8 if calc_second_order = False, else 14

        no_runs = np.zeros(len(iN_vec))
        indices = np.zeros(shape=(len(iN_vec), dim))
        indices_error = np.zeros(shape=(len(iN_vec), dim))
        idx = 0
        n_trials = 1
        no_runs_averaged = 1

        dist = cp.J(cp.Uniform(x_lower[0], x_upper[0]), cp.Uniform(x_lower[1], x_upper[1]),
                    cp.Uniform(x_lower[2], x_upper[2]), cp.Uniform(x_lower[3], x_upper[3]),
                    cp.Uniform(x_lower[4], x_upper[4]), cp.Uniform(x_lower[5], x_upper[5]))

        for iN in iN_vec:
            tmp_indices_error_av = np.zeros(dim)
            for i_trial in range(0, n_trials):
                seed = int(np.random.rand(1) * 2 ** 32 - 1)
                random_state = RandomState(seed)

                # https://github.com/jonathf/chaospy/issues/81

                dist_samples = dist.sample(iN)  # random samples or abscissas of polynomials ?
                values_f, _, _ = circuit_model.eval_model_averaged(dist_samples, no_runs_averaged,
                                                                   random_state=random_state)
                # Approximation with Chaospy
                poly = cp.orth_ttr(order_cp, dist)
                approx_model = cp.fit_regression(poly, dist_samples, values_f)
                tmp_indices_total = cp.Sens_t(approx_model, dist)

                tmp_error = relative_error_constantine_2017(tmp_indices_total, sobol_indices_quad_constantine)
                tmp_indices_error_av = tmp_indices_error_av + tmp_error
                print(iN)

            indices_error[idx, :] = tmp_indices_error_av / n_trials
            indices[idx, :] = tmp_indices_total
            no_runs[idx] = iN
            idx = idx + 1

        if bool_plot:
            col = np.array(
                [[0, 0.4470, 0.7410], [0.8500, 0.3250, 0.0980], [0.9290, 0.6940, 0.1250], [0.4940, 0.1840, 0.5560],
                 [0.4660, 0.6740, 0.1880], [0.3010, 0.7450, 0.9330]])

            plt.figure()
            for i in range(0, dim):
                plt.semilogx(no_runs, indices[:, i], '.--', label='%s (SALib)' % key[i], color=col[i, :])
                plt.semilogx([no_runs[0], max(no_runs)], sobol_indices_quad_constantine[i] * np.ones(2), 'k:',
                             label='Reference values', color=col[i, :])

            plt.xlabel('Number of samples')
            plt.ylabel('Sobol\' total indices')
            plt.legend()

            plt.figure()
            for i in range(0, dim):
                plt.loglog(no_runs, indices_error[:, i], '.--', label=key[i]+'(PC Approximation)', color=col[i, :])
                plt.loglog(M_constantine, sobol_indices_error_constantine[:, i], '.k:', color=col[i, :])

            plt.xlabel('Number of samples')
            plt.ylabel('Relative error (Sobol\' total indices)')
            plt.grid(True, 'minor', 'both')

            plt.legend()
            plt.show()

        # assure that it ran
        assert(True, True)