Example #1
0
    def compute_1D_points_weights(self, L, N):
        """
        Computes 1D collocation points and quad weights,
        and stores this in self.xi_1d, self.wi_1d.

        Parameters
        ----------
        L : (int) the max level of the (sparse) grid
        N : (int) the number of uncertain parameters

        Returns
        -------
        None.

        """
        # for every dimension (parameter), create a hierachy of 1D
        # quadrature rules of increasing order
        self.xi_1d = [{} for n in range(N)]
        self.wi_1d = [{} for n in range(N)]

        if self.sparse:

            # if level one of the sparse grid is a midpoint rule, generate
            # the quadrature with order 0 (1 quad point). Else set order at
            # level 1 to 1
            if self.midpoint_level1:
                j = 0
            else:
                j = 1

            for n in range(N):
                for i in range(L):
                    xi_i, wi_i = cp.generate_quadrature(
                        i + j,
                        self.params_distribution[n],
                        rule=self.quad_rule,
                        growth=self.growth)

                    self.xi_1d[n][i + 1] = xi_i[0]
                    self.wi_1d[n][i + 1] = wi_i
        else:
            for n in range(N):
                xi_i, wi_i = cp.generate_quadrature(
                    self.polynomial_order[n],
                    self.params_distribution[n],
                    rule=self.quad_rule,
                    growth=self.growth)

                self.xi_1d[n][self.polynomial_order[n]] = xi_i[0]
                self.wi_1d[n][self.polynomial_order[n]] = wi_i
Example #2
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
Example #3
0
def _construct_lookup(
    orders,
    dists,
    growth,
    recurrence_algorithm,
    rules,
    tolerance,
    scaling,
    n_max,
):
    """
    Create abscissas and weights look-up table so values do not need to be
    re-calculatated on the fly.
    """
    x_lookup = []
    w_lookup = []
    for max_order, dist, rule in zip(orders, dists, rules):
        x_lookup.append([])
        w_lookup.append([])
        for order in range(max_order + 1):
            (abscissas, ), weights = chaospy.generate_quadrature(
                order=order,
                dist=dist,
                growth=growth,
                recurrence_algorithm=recurrence_algorithm,
                rule=rule,
                tolerance=tolerance,
                scaling=scaling,
                n_max=n_max,
            )
            x_lookup[-1].append(abscissas)
            w_lookup[-1].append(weights)
    return x_lookup, w_lookup
Example #4
0
    def fit(self, high_fidelity, num_evals=None, quadrature_rule='gaussian'):
        """Fits the low-fidelity surrogate via Polynomial Chaos Expansion.

        Parameters
        ----------
        high_fidelity : HighFidelityModel
            Model that we want to approximate through the low-fidelity.
        num_evals : int, default None
            Parameter provided for consistency, the actual number of evaluations is determined by the
            quadrature rule.
        quadrature_rule: str, default 'gaussian'
            Rule used for the quadrature (passed to chaospy.generate_quadrature.
        """
        abscissae, weights = cpy.generate_quadrature(self.degree,
                                                     self.prior,
                                                     rule=quadrature_rule)
        self.expansion = generate_expansion(self.degree,
                                            self.prior,
                                            retall=False)
        widgets = [
            'fit\t',
            pb.Percentage(), ' ',
            pb.Bar('='), ' ',
            pb.AdaptiveETA(), ' - ',
            pb.Timer()
        ]
        bar = pb.ProgressBar(maxval=abscissae.T.shape[0], widgets=widgets)
        evals = []
        bar.start()
        for i, z_ in enumerate(abscissae.T):
            evals.append(high_fidelity.eval(z_))
            bar.update(i + 1)
        self.proxy = cpy.fit_quadrature(self.expansion, abscissae, weights,
                                        evals)
        self._fit = True
Example #5
0
    def compute_SC_weights(self, rule):
        """
        Computes the 1D quadrature weights w_j of the SC expansion:

            w_j = int L_j(x)p(x) dx                             (1)

        Here L_j is a Lagrange polynomial of the SC expansion.

        Parameters
        ----------
        - rule ("str"): chaospy quadrature rule used to compute (1),


        Returns
        -------
        - wi_1d (dict): wi_1d[n][l] gives an array
          of quadrature weigths for the n-th parameter at level l.

          IMPORTANT:
          If rule is the same as the rule used to compute the SC
          collocation points, these weights will equal the weights
          computed by chaospy, since L_j(x_k) = 1 when j=k and 0
          for the rest. This is the default setting.
        """

        # no need to recompute weights
        if rule == self.sampler.quadrature_rule:
            return self.sampler.wi_1d
        # recompute weights - generally not used
        else:
            wi_1d = {}

            params = self.sampler.params_distribution

            for n in range(self.N):
                # 1d weights for n-th parameter
                wi_1d[n] = {}
                # loop over all level of collocation method
                for level in range(1, self.L + 1):
                    # current SC nodes over dimension n and level
                    xi_1d = self.xi_1d[n][level]
                    wi_1d[n][level] = np.zeros(xi_1d.size)

                    # generate a quadrature rule to compute the SC weights
                    xi_quad, wi_quad = cp.generate_quadrature(level, params[n], rule=rule)
                    xi_quad = xi_quad[0]

                    # compute integral of the lagrange polynomial through xi_1d, weighted
                    # by the input distributions:
                    # w_j = int L_j(xi) p(xi) dxi j = 1,..,xi_1d.size
                    for j in range(xi_1d.size):
                        # values of L_i(xi_quad)
                        lagrange_quad = np.zeros(xi_quad.size)
                        for i in range(xi_quad.size):
                            lagrange_quad[i] = lagrange_poly(xi_quad[i], xi_1d, j)
                        # quadrature
                        wi_1d[n][level][j] = np.sum(lagrange_quad * wi_quad)

            return wi_1d
Example #6
0
def fit():
    nodes, weights = generate_quadrature(4,
                                         distribution,
                                         rule='G',
                                         sparse=False)
    print(np.max(nodes - indata.T))
    expansion = orth_ttr(3, distribution)
    return fit_quadrature(expansion, nodes, weights, outdata)
Example #7
0
def approximate_moment(distribution,
                       k_loc,
                       order=None,
                       rule="fejer",
                       **kwargs):
    """
    Approximation method for estimation of raw statistical moments.

    Uses quadrature integration to estimate the values.

    Args:
        distribution (Distribution):
            Distribution domain with dim=len(distribution)
        k_loc (Sequence[int, ...]):
            The exponents of the moments of interest with ``shape == (dim,)``.
        order (int):
            The quadrature order used in approximation. If omitted, calculated
            to be ``1000/log2(len(distribution)+1)``.
        rule (str):
            Quadrature rule for integrating moments.
        kwargs:
            Extra args passed to `chaospy.generate_quadrature`.

    Examples:
        >>> distribution = chaospy.Uniform(1, 4)
        >>> round(chaospy.approximate_moment(distribution, (1,)), 4)
        2.5
        >>> round(chaospy.approximate_moment(distribution, (2,)), 4)
        7.0

    """
    if order is None:
        order = int(1000. / numpy.log2(len(distribution) + 1))
    assert isinstance(order, int)
    assert isinstance(distribution, chaospy.Distribution)
    k_loc = tuple(numpy.asarray(k_loc).tolist())
    assert len(k_loc) == len(distribution), "incorrect size of exponents"
    assert all([isinstance(k, int) for k in k_loc
                ]), ("exponents must be integers: %s found" % type(k_loc[0]))

    if (distribution, order) not in MOMENTS_QUADS:
        MOMENTS_QUADS[distribution,
                      order] = chaospy.generate_quadrature(order,
                                                           distribution,
                                                           rule=rule,
                                                           **kwargs)
    X, W = MOMENTS_QUADS[distribution, order]

    if k_loc in distribution._mom_cache:
        return distribution._mom_cache[k_loc]

    out = float(numpy.sum(numpy.prod(X.T**k_loc, 1) * W))
    distribution._mom_cache[k_loc] = out
    return out
Example #8
0
 def __init__(self,
              function: callable,
              distribution: cp.distributions,
              polynomial_order=8,
              quadrature_order=8):
     self.distribution, self.polynomial_order, self.quadrature_order = distribution, polynomial_order, quadrature_order
     self.quad_points, self.quad_weights = cp.generate_quadrature(
         self.quadrature_order, self.distribution, rule="gaussian")
     self.polynomial_expansion = cp.generate_expansion(
         self.polynomial_order, self.distribution)
     self.f_approx = None
     super().__init__(function)
Example #9
0
def SRPCostRS(p, sim, pdf):

    polynomials = cp.orth_ttr(order=2, dist=pdf)
    samples, weights = cp.generate_quadrature(order=2,
                                              domain=pdf,
                                              rule="Gaussian")
    stateTensor = [SRPCost(p, sim, s) for s in samples.T]
    # stateTensor = pool.map(OptCost,samples.T)
    PCE = cp.fit_quadrature(polynomials, samples, weights, stateTensor)

    # print "PCE Expectation: {} ".format(cp.E(poly=PCE,dist=pdf))
    return cp.E(poly=PCE, dist=pdf)
Example #10
0
def make_quadrature():
    dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)

    nodes, weights = chaospy.generate_quadrature(2, dist, growth=True, rule="fejer", sparse=True)
    size = (weights*500).astype(int)
    indices = weights < 0

    pyplot.scatter(*nodes[:, indices], s=-size[indices], lw=3, color="w", edgecolors=COLOR2)
    pyplot.scatter(*nodes[:, indices], s=-size[indices], color=COLOR2, alpha=0.6)
    pyplot.scatter(*nodes[:, ~indices], s=size[~indices], lw=3, color="w", edgecolor=COLOR1)
    pyplot.scatter(*nodes[:, ~indices], s=size[~indices], color=COLOR1, alpha=0.6)

    save("quadrature")
    def solve_nonlinear(self, params, unknowns, resids):

        power = params['power']
        method_dict = params['method_dict']
        dist = method_dict['distribution']
        rule = method_dict['rule']
        n = len(power)
        if rule != 'rectangle':
            points, weights = cp.generate_quadrature(order=n - 1,
                                                     domain=dist,
                                                     rule=rule)
        # else:
        #     points, weights = quadrature_rules.rectangle(n, method_dict['distribution'])

        poly = cp.orth_chol(n - 1, dist)
        # poly = cp.orth_bert(n-1, dist)
        # double check this is giving me good orthogonal polynomials.
        # print poly, '\n'
        p2 = cp.outer(poly, poly)
        # print 'chol', cp.E(p2, dist)
        norms = np.diagonal(cp.E(p2, dist))
        print 'diag', norms

        expansion, coeff = cp.fit_quadrature(poly,
                                             points,
                                             weights,
                                             power,
                                             retall=True,
                                             norms=norms)
        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)

        mean = cp.E(expansion, dist)
        print 'mean cp.E =', mean
        # mean = sum(power*weights)
        print 'mean sum =', sum(power * weights)
        print 'mean coeff =', coeff[0]
        std = cp.Std(expansion, dist)

        print mean
        print std
        print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns['mean'] = mean * hours
        unknowns['std'] = std * hours

        print 'In ChaospyStatistics'
    def check_max_quad_level(self):
        """

        If a discrete variable is specified, there is the possibility of
        non unique collocation points if the quadrature order is high enough.
        This subroutine prevents that.

        NOTE: Only detects cp.DiscreteUniform thus far

        The max quad orders are stores in self.max_quad_order

        Returns
        -------
        None

        """
        # assume no maximum by default
        self.max_level = np.ones(self.N) * 1000
        for n in range(self.N):

            # if a discrete uniform is specified check max order
            if isinstance(self.params_distribution[n], cp.DiscreteUniform):

                # if level one of the sparse grid is a midpoint rule, generate
                # the quadrature with order 0 (1 quad point). Else set order at
                # level 1 to 1
                if self.midpoint_level1:
                    j = 0
                else:
                    j = 1

                number_of_points = 0
                for order in range(1000):
                    xi_i, wi_i = cp.generate_quadrature(
                        order + j,
                        self.params_distribution[n],
                        growth=self.growth)
                    # if the quadrature points no longer grow with the quad order,
                    # then the max order has been reached
                    if xi_i.size == number_of_points:
                        break
                    number_of_points = xi_i.size

                logging.debug(
                    "Input %d is discrete, setting max quadrature order to %d"
                    % (n, order - 1))
                # level 1 = order 0 etc
                self.max_level[n] = order
Example #13
0
 def __init__(self, varsetInst, regen=True):
     self.varset = varsetInst.varset
     self.Nvar = varsetInst.Nvar
     self.jointDist = varsetInst.jointDist
     order = 3  #order of ortho polynomial
     if regen:
         self.polynomials = cp.orth_ttr(order, self.jointDist)
         self.nodes, self.weights = cp.generate_quadrature(order + 1,
                                                           self.jointDist,
                                                           rule="G",
                                                           sparse=True)
         pkl.dump([self.polynomials, self.nodes, self.weights],
                  open('pcecoeff.dump', 'wb'))
     else:
         [self.polynomials, self.nodes,
          self.weights] = pkl.load(open('pcecoeff.dump', 'rb'))
     self.nsamples = self.nodes.shape[1]
     self.co2Model = None
Example #14
0
def evaluate_postprocessing(distribution, data, expansion):
    import matplotlib.pyplot as plt
    from profit import read_input
    from chaospy import generate_quadrature, orth_ttr, fit_quadrature, E, Std, descriptives

    nodes, weights = generate_quadrature(uq.backend.order + 1,
                                         distribution,
                                         rule='G')
    expansion = orth_ttr(uq.backend.order, distribution)
    approx = fit_quadrature(expansion, nodes, weights,
                            np.mean(data[:, 0, :], axis=1))
    urange = list(uq.params.values())[0].range()
    vrange = list(uq.params.values())[1].range()
    u = np.linspace(urange[0], urange[1], 100)
    v = np.linspace(vrange[0], vrange[1], 100)
    U, V = np.meshgrid(u, v)
    c = approx(U, V)

    # for 3 parameters:
    #wrange = list(uq.params.values())[2].range()
    #w = np.linspace(wrange[0], wrange[1], 100)
    #W = 0.03*np.ones(U.shape)
    #c = approx(U,V,W)

    plt.figure()
    plt.contour(U, V, c, 20)
    plt.colorbar()
    plt.scatter(config.eval_points[0, :],
                config.eval_points[1, :],
                c=np.mean(data[:, 0, :], axis=1))

    plt.show()

    F0 = E(approx, distribution)
    dF = Std(approx, distribution)
    sobol1 = descriptives.sensitivity.Sens_m(approx, distribution)
    sobolt = descriptives.sensitivity.Sens_t(approx, distribution)
    sobol2 = descriptives.sensitivity.Sens_m2(approx, distribution)

    print('F = {} +- {}%'.format(F0, 100 * abs(dF / F0)))
    print('1st order sensitivity indices:\n {}'.format(sobol1))
    print('Total order sensitivity indices:\n {}'.format(sobolt))
    print('2nd order sensitivity indices:\n {}'.format(sobol2))
def calculate_sobol_indices(quad_deg_1D, poly_deg_1D, joint_distr, sparse_bool,
                            title_names):
    nodes, weights = cp.generate_quadrature(quad_deg_1D,
                                            joint_distr,
                                            rule='G',
                                            sparse=sparse_bool)
    c, k, f, y0, y1 = nodes

    poly = cp.orth_ttr(poly_deg_1D, joint_distr, normed=True)

    y_out = [
        discretize_oscillator_odeint(model, atol, rtol, (y0_, y1_),
                                     (c_, k_, f_, w), t)[-1]
        for c_, k_, f_, y0_, y1_ in zip(c, k, f, y0, y1)
    ]

    # find generalized Polynomial chaos and expansion coefficients
    gPC_m, expansion_coeff = cp.fit_quadrature(poly,
                                               nodes,
                                               weights,
                                               y_out,
                                               retall=True)
    #print(f'The best polynomial of degree {poly_deg_1D} that approximates f(x): {cp.around(gPC_m, 1)}')
    # gPC_m is the polynomial that approximates the most
    print(
        f'Expansion coeff [0] (mean) for poly {poly_deg_1D} = {expansion_coeff[0]}'
    )  # , expect_weights: {expect_y}')
    #mu = cp.E(gPC_m, joint_distr)
    #print(f'Mean value from gPCE: {mu}')

    # Sobol indices
    first_order_Sobol_ind = cp.Sens_m(gPC_m, joint_distr)
    total_Sobol_ind = cp.Sens_t(gPC_m, joint_distr)

    print("The number of quadrature nodes for the grid is", len(nodes.T))
    print(f'The first order Sobol indices are \n {first_order_Sobol_ind}')
    print(f"The total Sobol' indices are \n {total_Sobol_ind}")

    plot_sobol_indices(first_order_Sobol_ind, title_names[0], False)
    plot_sobol_indices(total_Sobol_ind, title_names[1], False)

    return first_order_Sobol_ind, total_Sobol_ind
    def solve_nonlinear(self, params, unknowns, resids):

        power = params["dirPowers"]
        method_dict = params["method_dict"]
        dist = method_dict["distribution"]
        n = len(power)
        points, weights = cp.generate_quadrature(order=n - 1, domain=dist, rule="G")
        poly = cp.orth_ttr(
            n - 1, dist
        )  # Think about the n-1 for 1d for 2d or more it would be n-2. Details Dakota reference manual quadrature order.
        # Double check if giving me orthogonal polynomials
        # p2 = cp.outer(poly, poly)
        # norms = np.diagonal(cp.E(p2, dist))
        # print 'diag', norms

        # expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True, norms=norms)
        expansion, coeff = cp.fit_quadrature(poly, points, weights, power, retall=True)
        # expansion, coeff = cp.fit_regression(poly, points, power, retall=True)

        mean = cp.E(expansion, dist, rule="G")
        # print 'mean cp.E =', mean
        # # mean = sum(power*weights)
        # print 'mean sum =', sum(power*weights)
        # print 'mean coeff =', coeff[0]*8760/1e6
        std = cp.Std(expansion, dist, rule="G")

        # print mean
        # print std
        # print np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # # std = np.sqrt(np.sum(coeff[1:]**2 * cp.E(poly**2, dist)[1:]))
        # number of hours in a year
        hours = 8760.0
        # promote statistics to class attribute
        unknowns["mean"] = mean * hours
        unknowns["std"] = std * hours

        # Modify the statistics to account for the truncation of the weibull (speed) case.
        modify_statistics(params, unknowns)  # It doesn't do anything for the direction case.

        print "In ChaospyStatistics"
Example #17
0
def getPointsSpeed(dist, method_dict, n):

    method = method_dict['method']
    bnd = dist.range()
    a = bnd[0]  # lower boundary
    b = bnd[1]  # upper boundary
    a = a[0]  # get rid of the list
    b = b[0]  # get rid of the list

    if method == 'rect':

        X = np.linspace(a, b, n+1)
        dx = X[1]-X[0]
        x = X[:-1]+dx/2  # Take the midpoints of the bins
        # Get the weights associated with the points locations
        w = []
        for i in range(n):
            w.append(dist._cdf(X[i+1]) - dist._cdf(X[i]))

        w = np.array(w).flatten()

    if method == 'dakota':

        x, f = generate_speed_abscissas_ordinates(a, b, dist)
        updateDakotaFile(method_dict, n, x, f)
        # run Dakota file to get the points locations
        x, w = getSamplePoints(method_dict['dakota_filename'])
        assert len(x) == 1, 'Should only be returning the speeds'
        x = np.array(x[0])

        # Rescale x
        x = (b-a)/2. + (b-a)/2.*x + a

    if method == 'chaospy':
        x, w = cp.generate_quadrature(n-1, dist, rule='G')
        x = x[0]

    return x, w
Example #18
0
def _criterion(rho_c, arg, distributions, order=15):
    """
    Evaluates the integral using a Gauss-Hermite rule in 2 dimensions.
    It requires the Cholesky decomposition of the covariance matrix in order to
    transform the integral properly.
    """
    cov = np.identity(2)
    cov[1, 0] = cov[0, 1] = rho_c

    chol = np.linalg.cholesky(cov)
    distribution = cp.Iid(cp.Normal(0, 1), 2)

    nodes, weights = cp.generate_quadrature(order,
                                            distribution,
                                            rule="gaussian")

    x_1, x_2 = np.split(chol @ nodes, 2)

    standard_norm_cdf = cp.Normal().cdf
    arg_1 = distributions[0].inv(standard_norm_cdf(x_1))
    arg_2 = distributions[1].inv(standard_norm_cdf(x_2))
    point = arg_1 * arg_2

    return (sum(point[0] * weights) - arg)**2
Example #19
0
"""
Created: Fri Jul 26 09:57:16 2019
@author: Christopher Albert <*****@*****.**>
"""

import time

from chaospy import Normal, J, generate_quadrature

params = [Normal(mu=0.999,  sigma=0.0052), 
          Normal(mu=27.1,   sigma=17.0), 
          Normal(mu=0.318,  sigma=0.1),
          Normal(mu=0.015,  sigma=0.0087),
          Normal(mu=0.0817, sigma=0.0077),
          Normal(mu=1.309,  sigma=0.086),
          Normal(mu=2.19,   sigma=0.22)]

dist = J(*params)
#%%
t = time.time()
nodes, weights = generate_quadrature(4, dist, rule='G', sparse=True)
print('time elapsed: {} s'.format(time.time() - t))
Example #20
0
P = cp.orth_ttr(2, dist_R)
nodes_R = dist_R.sample(2*len(P), "M")
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_regression(P, nodes_R, samples_u)




#Rosenblat transformation using pseudo spectral

def u(x,a, I):
    return I*np.exp(-a*x)

C = [[1,0.5],[0.5,1]]
mu = np.array([0, 0])
dist_R = cp.J(cp.Normal(), cp.Normal())
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R, weights_R = cp.generate_quadrature(3, dist_R)
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))
weights_Q = weights_R*dist_Q.pdf(nodes_Q)/dist_R.pdf(nodes_R)

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_quadrature(P, nodes_R, weights_Q, samples_u)
Example #21
0
	integral = sum(f(nodes[0])*weights)

	return integral

def pdf(self, x):
	return kernel.pdf(x)

def cdf(self, x):
	return kernel.integrate_box1d(min(samples), x)

def bnd(self):
	return min(samples), max(samples)

if __name__ == '__main__':
	quad_deg = 4

	# standard approach
	dist_real 				= cp.Normal()
	nodes_std, weights_std 	= cp.generate_quadrature(quad_deg, dist_real, rule = "G")
	integral_std 			= eval_integral(test_function, nodes_std, weights_std)

	# convoluted approach
	samples 						= random.normal(0, 1, size=100000)
	kernel 							= gaussian_kde(samples)
	Dist 							= cp.construct(pdf=pdf, cdf=cdf, bnd=bnd)
	dist_approx 					= Dist()
	nodes_approx, weights_approx 	= cp.generate_quadrature(2*quad_deg, dist_approx, rule="G")
	integral_approx 				= eval_integral(test_function, nodes_approx, weights_approx)

	print integral_std
	print integral_approx
Example #22
0
    def __init__(self,
                 vary=None,
                 count=0,
                 polynomial_order=4,
                 regression=False,
                 rule="G",
                 sparse=False,
                 growth=False):
        """
        Create the sampler for the Polynomial Chaos Expansion using
        pseudo-spectral projection or regression (Point Collocation).

        Parameters
        ----------
        vary: dict or None
            keys = parameters to be sampled, values = distributions.

        count : int, optional
            Specified counter for Fast forward, default is 0.

        polynomial_order : int, optional
            The polynomial order, default is 4.

        regression : bool, optional
            If True, regression variante (point collecation) will be used,
            otherwise projection variante (pseud-spectral) will be used.
            Default value is False.

        rule : char, optional
            The quadrature method, in case of projection (default is Gaussian "G").
            The sequence sampler in case of regression (default is Hammersley "M")

        sparse : bool, optional
            If True, use Smolyak sparse grid instead of normal tensor product
            grid. Default value is False.

        growth (bool, None), optional
            If True, quadrature point became nested.
        """

        if vary is None:
            msg = ("'vary' cannot be None. RandomSampler must be passed a "
                   "dict of the names of the parameters you want to vary, "
                   "and their corresponding distributions.")
            logging.error(msg)
            raise Exception(msg)
        if not isinstance(vary, dict):
            msg = ("'vary' must be a dictionary of the names of the "
                   "parameters you want to vary, and their corresponding "
                   "distributions.")
            logging.error(msg)
            raise Exception(msg)
        if len(vary) == 0:
            msg = "'vary' cannot be empty."
            logging.error(msg)
            raise Exception(msg)

        self.vary = Vary(vary)
        self.polynomial_order = polynomial_order

        # List of the probability distributions of uncertain parameters
        params_distribution = list(vary.values())

        # Multivariate distribution
        self.distribution = cp.J(*params_distribution)

        # The orthogonal polynomials corresponding to the joint distribution
        self.P = cp.expansion.stieltjes(polynomial_order,
                                        self.distribution,
                                        normed=True)

        # The quadrature information
        self.quad_sparse = sparse
        self.rule = rule

        # Clenshaw-Curtis should be nested if sparse (#139 chaospy issue)
        self.quad_growth = growth
        cc = ['c', 'C', 'clenshaw_curtis', 'Clenshaw_Curtis']
        if sparse and rule in cc:
            self.quad_growth = True

        # To determinate the PCE vrainte to use
        self.regression = regression

        # Regression variante (Point collocation method)
        if regression:
            # Change the default rule
            if rule == "G":
                self.rule = "M"

            # Generates samples
            self._n_samples = 2 * len(self.P)
            self._nodes = cp.generate_samples(order=self._n_samples,
                                              domain=self.distribution,
                                              rule=self.rule)
            self._weights = None

        # Projection variante (Pseudo-spectral method)
        else:
            # Nodes and weights for the integration
            self._nodes, self._weights = cp.generate_quadrature(
                order=polynomial_order,
                dist=self.distribution,
                rule=self.rule,
                sparse=sparse,
                growth=self.quad_growth)
            # Number of samples
            self._n_samples = len(self._nodes[0])

        # Fast forward to specified count, if possible
        self.count = 0
        if self.count >= self._n_samples:
            msg = (
                f"Attempt to start sampler fastforwarded to count {self.count}, "
                f"but sampler only has {self.n_samples} samples, therefore"
                f"this sampler will not provide any more samples.")
            logging.warning(msg)
        else:
            for i in range(count):
                self.__next__()
Example #23
0
def test_gen_quad(dist):
    N, w = cp.generate_quadrature(5, dist, "G")
    return N, w
Example #24
0
def getPointsRawAmaliaDistribution(dist, method_dict, n):

    method = method_dict['method']
    bnd = dist.range()
    a = bnd[0]  # left boundary
    b = bnd[1]  # right boundary
    a = a[0]  # get rid of the list
    b = b[0]  # get rid of the list

    C = 225  # Location of max probability or desired starting location.
    R = b-a  # range 360

    # Modify with offset, manually choose the offset you want
    N = method_dict['Noffset']  # N = 10
    i = method_dict['offset']  # i = [0, 1, 2, N-1]

    if method == 'rect':
        # the offset fits N points in the given dx interval
        dx = R/n
        offset = i*dx/N  # make sure this is float
        bounds = [a+offset, R+offset]
        x = np.linspace(bounds[0], bounds[1], n+1)
        x = x[:-1]+dx/2  # Take the midpoints of the bins
        # Modify x, to start from the max probability location
        x = (x+C) % R
        # Get the weights associated with the points locations
        w = getWeights(x, dx, dist)

    if method == 'dakota':

        # Modify the starting point C with offset
        offset = i*R/N  # the offset modifies the starting point for N locations within the whole interval
        C = (C + offset) % R
        # Use the y to set the abscissas, and the pdf to set the ordinates
        y = np.linspace(a, R, 51)  # play with the number here
        dy = y[1]-y[0]
        mid = y[:-1]+dy/2

        # Modify the mid to start from the max probability location
        ynew = (mid+C) % R

        f = dist.pdf(ynew)

        # Modify y to -1 to 1 range, I think makes dakota generation of polynomials easier
        x = 2*(y-a) / R - 1

        updateDakotaFile(method_dict, n, x, f)
        # run Dakota file to get the points locations
        x, w = getSamplePoints(method_dict['dakota_filename'])
        assert len(x) == 1, 'Should only be returning the directions'
        x = np.array(x[0])
        # Rescale x
        x = R*x/2. + R/2. + a

        # Call modify x with the new x.
        x = (x+C) % R

    if method == 'chaospy':
        # I need to adjust the starting position and all of that.
        x, w = cp.generate_quadrature(n-1, dist, rule='G')
        x = x[0]

    return x, w
Example #25
0
    def __init__(self,
                 vary=None,
                 polynomial_order=4,
                 quadrature_rule="G",
                 count=0,
                 growth=False,
                 sparse=False):
        """
        Create the sampler for the Stochastic Collocation method.

        Parameters
        ----------
        vary: dict or None
            keys = parameters to be sampled, values = distributions.
        polynomial_order : int, optional
            The polynomial order, default is 4.

        quadrature_rule : char, optional
            The quadrature method, default is Gaussian "G".

        growth: bool, optional
             Sets the growth rule to exponential for Clenshaw Curtis quadrature,
             which makes it nested, and therefore more efficient for sparse grids.
             Default is False.

        sparse : bool, optional
            If True use sparse grid instead of normal tensor product grid,
            default is False.
        """

        self.vary = Vary(vary)
        self.quadrature_rule = quadrature_rule

        # List of the probability distributions of uncertain parameters
        params_distribution = list(self.vary.get_values())
        # N = number of uncertain parameters
        N = len(params_distribution)

        logging.debug("param dist {}".format(params_distribution))

        # Multivariate distribution
        self.joint_dist = cp.J(*params_distribution)

        # The quadrature information: order, rule and sparsity
        if isinstance(polynomial_order, int):
            print('Received integer polynomial order, assuming isotropic grid')
            self.polynomial_order = [polynomial_order for i in range(N)]
        else:
            self.polynomial_order = polynomial_order

        self.quad_rule = quadrature_rule
        self.sparse = sparse
        self.quad_sparse = sparse
        self.growth = growth
        self.params_distribution = params_distribution

        # L = level of (sparse) grid
        L = np.max(self.polynomial_order)

        # for every dimension (parameter), create a hierachy of 1D
        # quadrature rules of increasing order
        self.xi_1d = [{} for n in range(N)]
        self.wi_1d = [{} for n in range(N)]

        #for n in range(N):
        #    self.xi_1d[n] = {}
        #    self.wi_1d[n] = {}

        if sparse:
            for n in range(N):
                for i in range(1, L + 1):
                    xi_i, wi_i = cp.generate_quadrature(i + 1,
                                                        params_distribution[n],
                                                        rule=self.quad_rule,
                                                        growth=self.growth)

                    self.xi_1d[n][i] = xi_i[0]
                    self.wi_1d[n][i] = wi_i
        else:
            for n in range(N):
                xi_i, wi_i = cp.generate_quadrature(self.polynomial_order[n],
                                                    params_distribution[n],
                                                    rule=self.quad_rule,
                                                    growth=self.growth)

                self.xi_1d[n][self.polynomial_order[n]] = xi_i[0]
                self.wi_1d[n][self.polynomial_order[n]] = wi_i

        if not sparse:
            # Generate collocation grid via chaospy
            # NOTE: different poly orders per dimension does not work for all
            #      guadarture rules - use self.generate_grid subroutine instead
            # # the nodes of the collocation grid
            # xi_d, _ = cp.generate_quadrature(self.polynomial_order,
            #                                  self.joint_dist,
            #                                  rule=quadrature_rule)
            # self.xi_d = xi_d.T

            # generate collocation grid locally
            l_norm = np.array([self.polynomial_order])
            self.xi_d = self.generate_grid(L, N, l_norm)

        # sparse grid = a linear combination of tensor products of 1D rules
        # of different order. Use chaospy to compute these 1D quadrature rules
        else:

            # L >= N must hold
            if L < N:
                raise RuntimeError((
                    "Sparse grid level is lower than the number of params. "
                    "Increase level (via polynomial_order) p such that p-1 >= N"
                ))

            # multi-index l, such that |l| <= L
            l_norm_le_L = self.compute_sparse_multi_idx(L, N)

            # create sparse grid of dimension N and level q using the 1d
            #rules in self.xi_1d
            self.xi_d = self.generate_grid(L, N, l_norm_le_L)

        self.L = L
        self.N = N
        self._number_of_samples = self.xi_d.shape[0]

        # Fast forward to specified count, if possible
        self.count = 0
        if self.count >= self._number_of_samples:
            msg = (
                f"Attempt to start sampler fastforwarded to count {self.count}, "
                f"but sampler only has {self._number_of_samples} samples, therefore"
                f"this sampler will not provide any more samples.")
            logging.warning(msg)
        else:
            for i in range(count):
                self.__next__()
Example #26
0
def discretized_stieltjes(
    order,
    dist,
    rule=None,
    tolerance=1e-16,
    scaling=3,
    n_max=5000,
):
    """
    Discretized Stieltjes' method.

    Examples:
        >>> dist = chaospy.J(chaospy.Uniform(0, 1), chaospy.Beta(3, 4))
        >>> (alpha, beta), orth, norms = chaospy.discretized_stieltjes(2, dist)
        >>> alpha.round(5)
        array([[0.5    , 0.5    , 0.5    ],
               [0.42857, 0.46032, 0.47475]])
        >>> beta.round(5)
        array([[1.     , 0.08333, 0.06667],
               [1.     , 0.03061, 0.04321]])
        >>> orth[:, 2].round(5)
        polynomial([q0**2-q0+0.16667, q1**2-0.88889*q1+0.16667])
        >>> norms.round(5)
        array([[1.     , 0.08333, 0.00556],
               [1.     , 0.03061, 0.00132]])

    """
    if len(dist) > 1:
        assert not dist.stochastic_dependent
        coeffs, orths, norms = zip(*[
            discretized_stieltjes(
                order, dist_, rule=rule, tolerance=tolerance, scaling=scaling)
            for dist_ in dist
        ])
        coeffs = numpy.dstack(coeffs).reshape(2, len(dist), order + 1)
        variables = list(numpoly.variable(len(dist)))
        orths = [orths[idx](q0=variables[idx]) for idx in range(len(dist))]
        orths = numpoly.polynomial(orths).reshape(len(dist), order + 1)
        norms = numpy.asfarray(norms).reshape(len(dist), order + 1)
        return coeffs, orths, norms

    if rule is None:
        rule = "discrete" if dist.interpret_as_integer else "clenshaw_curtis"
    order_ = (2 * order - 1.) / scaling
    beta = beta_old = numpy.nan
    var = numpoly.variable()
    orths = [numpoly.polynomial(0.), numpoly.polynomial(1.)] + [None] * order
    norms = numpy.ones(order + 2)
    coeffs = numpy.ones((2, order + 1))

    while not numpy.all(numpy.abs(coeffs[1] - beta_old) < tolerance):

        beta_old = coeffs[1].copy()
        order_ = max(order_ * scaling, order_ + 1)
        if order_ > n_max:
            break

        [abscissas], weights = chaospy.generate_quadrature(int(order_),
                                                           dist,
                                                           rule=rule,
                                                           segments=0)
        inner = numpy.sum(abscissas * weights)
        for idx in range(order):
            coeffs[0, idx] = inner / norms[idx + 1]
            coeffs[1, idx] = norms[idx + 1] / norms[idx]
            orths[idx + 2] = ((var - coeffs[0, idx]) * orths[idx + 1] -
                              orths[idx] * coeffs[1, idx])
            norms[idx + 2] = numpy.sum(orths[idx + 2](abscissas)**2 * weights)
            inner = numpy.sum(abscissas * orths[idx + 2](abscissas)**2 *
                              weights)
        coeffs[:, order] = (inner / norms[-1], norms[-1] / norms[-2])

    coeffs = coeffs.reshape(2, 1, order + 1)
    orths = numpoly.polynomial(orths[1:]).reshape(1, order + 1)
    norms = numpy.array(norms[1:]).reshape(1, order + 1)
    return coeffs, orths, norms
Example #27
0
def test_approx_quadrature():
    dist = cp.Iid(normal(), dim)
    nodes, weights = cp.generate_quadrature(order, dist, rule="C")
Example #28
0

totalerrorqMC = np.divide(totalerrorqMC, reruns)
totalvarianceqMC = np.divide(totalvarianceqMC, reruns)



errorCP = []
varCP = []

K = []

N = 5
for n in xrange(0,N+1):
    P = cp.orth_ttr(n, dist)
    nodes, weights = cp.generate_quadrature(n+1, dist, rule="G")
    K.append(len(nodes[0]))
    i1,i2 = np.mgrid[:len(weights), :Nt]
    solves = u(T[i2],nodes[0][i1],nodes[1][i1])

    U_hat = cp.fit_quadrature(P, nodes, weights, solves)
    errorCP.append(dt*np.sum(np.abs(E_analytical(T) - cp.E(U_hat,dist))))
    varCP.append(dt*np.sum(np.abs(V_analytical(T) - cp.Var(U_hat,dist))))


# pl.rc("figure", figsize=[6,4])

ax, tableau20 = prettyPlot()
pl.plot(-1, 1, "k-", linewidth=2)
pl.plot(-1, 1, "k--", linewidth=2)
pl.plot(-1, 1, color=tableau20[0], linewidth=2)
Example #29
0
data = get_data(nrun, cdir)
#rescale oxygen flux
data[:, 0, :] = -data[:, 0, :] * 86400.

uq = profit.UQ(yaml='uq.yaml')
distribution = cp.J(*uq.params.values())
sparse = uq.backend.sparse
if sparse:
    order = 2 * 3
else:
    order = 3 + 1

# actually start the postprocessing now:

nodes, weights = cp.generate_quadrature(order,
                                        distribution,
                                        rule='G',
                                        sparse=sparse)
expansion, norms = cp.orth_ttr(3, distribution, retall=True)
approx_denit = cp.fit_quadrature(expansion, nodes, weights,
                                 np.mean(data[:, 1, :], axis=1))
approx_oxy = cp.fit_quadrature(expansion, nodes, weights,
                               np.mean(data[:, 0, :], axis=1))

annual_oxy = cp.fit_quadrature(expansion, nodes, weights, data[:, 0, :])
annual_denit = cp.fit_quadrature(expansion, nodes, weights, data[:, 1, :])

s_denit = cp.descriptives.sensitivity.Sens_m(annual_denit, distribution)
s_oxy = cp.descriptives.sensitivity.Sens_m(annual_oxy, distribution)

df_oxy = cp.Std(annual_oxy, distribution)
df_denit = cp.Std(annual_denit, distribution)
import chaospy as cp
import numpy as np

if __name__ == '__main__':
    # define test function
    f = lambda x: np.sum([x**i for i in xrange(0, 8)])

    # compute the analytical result of int_0^1 f(x)dx
    integral_f = np.sum([1. / (i + 1) for i in xrange(0, 8)])

    # number of nodes and weights; note that for a given M, chaospy generate M + 1 nodes and weights
    N = [0, 1, 2, 3, 4, 5]

    # the computations are performed with respect to the uniform distribution
    distr = cp.Uniform()

    evals = np.zeros(len(N))
    for i, n in enumerate(N):
        # generate nodes and weights
        nodes, weights = cp.generate_quadrature(n, distr, rule='G')

        evals[i] = np.sum([f(n) * w for n, w in zip(nodes[0], weights)])

    print 'The analytic result is', integral_f
    for i, ev in enumerate(evals):
        print 'The approximation using', N[i] + 1, 'quadrature points is ', ev
Example #31
0
    def __init__(self,
                 vary=None,
                 polynomial_order=4,
                 quadrature_rule="G",
                 count=0,
                 growth=False,
                 sparse=False):
        """
        Create the sampler for the Stochastic Collocation method.

        Parameters
        ----------
        vary: dict or None
            keys = parameters to be sampled, values = distributions.
        polynomial_order : int, optional
            The polynomial order, default is 4.

        quadrature_rule : char, optional
            The quadrature method, default is Gaussian "G".

        growth: bool, optional
             Sets the growth rule to exponential for Clenshaw Curtis quadrature,
             which makes it nested, and therefore more efficient for sparse grids.
             Default is False.

        sparse : bool, optional
            If True use sparse grid instead of normal tensor product grid,
            default is False.
        """

        self.vary = Vary(vary)
        self.quadrature_rule = quadrature_rule

        # List of the probability distributions of uncertain parameters
        params_distribution = list(self.vary.get_values())

        print("param dist", params_distribution)

        # Multivariate distribution
        self.joint_dist = cp.J(*params_distribution)

        # The quadrature information: order, rule and sparsity
        self.polynomial_order = polynomial_order
        self.quad_rule = quadrature_rule
        self.sparse = sparse
        self.quad_sparse = sparse
        self.growth = growth
        self.params_distribution = params_distribution

        # L = level of (sparse) grid
        L = self.polynomial_order
        # N = number of uncertain parameters
        N = len(params_distribution)

        # for every dimension (parameter), create a hierachy of 1D
        # quadrature rules of increasing order
        self.xi_1d = {}
        self.wi_1d = {}

        for n in range(N):
            self.xi_1d[n] = {}
            self.wi_1d[n] = {}

        if sparse:
            for n in range(N):
                for i in range(1, self.polynomial_order + 1):
                    xi_i, wi_i = cp.generate_quadrature(i + 1,
                                                        params_distribution[n],
                                                        rule=self.quad_rule,
                                                        growth=self.growth)

                    self.xi_1d[n][i] = xi_i[0]
                    self.wi_1d[n][i] = wi_i
        else:
            for n in range(N):
                xi_i, wi_i = cp.generate_quadrature(self.polynomial_order,
                                                    params_distribution[n],
                                                    rule=self.quad_rule,
                                                    growth=self.growth)
                self.xi_1d[n][self.polynomial_order] = xi_i[0]
                self.wi_1d[n][self.polynomial_order] = wi_i

        if not sparse:
            # the nodes of the collocation grid
            xi_d, _ = cp.generate_quadrature(self.polynomial_order,
                                             self.joint_dist,
                                             rule=quadrature_rule)
            self.xi_d = xi_d.T
        # sparse grid = a linear combination of tensor products of 1D rules
        # of different order. Use chaospy to compute these 1D quadrature rules
        else:

            # L >= N must hold
            if L < N:
                print(
                    "*************************************************************"
                )
                print(
                    "Level of sparse grid is lower than the dimension N (# params)"
                )
                print(
                    "Increase level (via polynomial_order) p such that p-1 >= N"
                )
                print(
                    "*************************************************************"
                )
                import sys
                sys.exit()

            # multi-index l, such that |l| <= L
            l_norm_le_L = self.compute_sparse_multi_idx(L, N)

            # create sparse grid of dimension N and level q using the 1d
            #rules in self.xi_1d
            self.xi_d = self.generate_grid(L, N, l_norm_le_L)

        self.L = L
        self.N = N
        self._number_of_samples = self.xi_d.shape[0]

        # Fast forward to specified count, if possible
        self.count = 0
        if self.count >= self._number_of_samples:
            msg = (
                f"Attempt to start sampler fastforwarded to count {self.count}, "
                f"but sampler only has {self._number_of_samples} samples, therefore"
                f"this sampler will not provide any more samples.")
            logging.warning(msg)
        else:
            for i in range(count):
                self.__next__()
Example #32
0
    for n in xrange(N-1):
        dx = x[n+1] - x[n]
        K1 = -dx*u[n]*c(x[n])
        K2 = -dx*u[n] + K1/2*c(x[n]+dx/2)
        u[n+1] = u[n] + K1 + K2
    return u

# Define distributions of input parameters
c0 = cp.Normal(0.5, 0.15)
c1 = cp.Uniform(0.5, 2.5)
c2 = cp.Uniform(0.03, 0.07)
# Joint probability distribution
distribution = cp.J(c0, c1, c2)

# Create 3rd order quadrature scheme
nodes, weights = cp.generate_quadrature(
    order=3, domain=distribution, rule="Gaussian")

u0 = 0.3
# Evaluate model at the nodes
x = np.linspace(0, 1, 101)
samples = [model(x, u0, node[0], node[1], node[2])
           for node in nodes.T]

# Generate 3rd order orthogonal polynomial expansion
polynomials = cp.orth_ttr(order=3, dist=distribution)

# Create model approximation (surrogate solver)
model_approx = cp.fit_quadrature(
               polynomials, nodes, weights, samples)

# Model analysis
Example #33
0
    def __init__(self,
                 vary=None,
                 count=0,
                 polynomial_order=4,
                 quadrature_rule="G",
                 sparse=False,
                 growth=None):
        """
        Create the sampler for the Polynomial Chaos Expansion method using
        pseudo-spectral projection.

        Parameters
        ----------
        vary: dict or None
            keys = parameters to be sampled, values = distributions.

        count : int, optional
            Specified counter for Fast forward, default is 0.

        polynomial_order : int, optional
            The polynomial order, default is 4.

        quadrature_rule : char, optional
            The quadrature method, default is Gaussian "G".

        sparse : bool, optional
            If True, use Smolyak sparse grid instead of normal tensor product
            grid. Default value is False.

        growth (bool, None), optional
            If True, quadrature point became nested for sparse grids.
            Default value is the same as ``sparse`` if omitted, otherwise None.
        """

        if vary is None:
            msg = ("'vary' cannot be None. RandomSampler must be passed a "
                   "dict of the names of the parameters you want to vary, "
                   "and their corresponding distributions.")
            logging.error(msg)
            raise Exception(msg)
        if not isinstance(vary, dict):
            msg = ("'vary' must be a dictionary of the names of the "
                   "parameters you want to vary, and their corresponding "
                   "distributions.")
            logging.error(msg)
            raise Exception(msg)
        if len(vary) == 0:
            msg = "'vary' cannot be empty."
            logging.error(msg)
            raise Exception(msg)

        self.vary = Vary(vary)
        self.polynomial_order = polynomial_order

        # List of the probability distributions of uncertain parameters
        params_distribution = list(vary.values())

        # Multivariate distribution
        self.distribution = cp.J(*params_distribution)

        # The orthogonal polynomials corresponding to the joint distribution
        self.P = cp.orth_ttr(polynomial_order, self.distribution)

        # The quadrature information: order, rule and sparsity
        self.quad_order = polynomial_order + 1
        self.quad_rule = quadrature_rule
        self.quad_sparse = sparse
        if sparse:
            self.quad_growth = True
        else:
            self.quad_growth = growth

        # Nodes and weights for the integration
        self._nodes, _ = cp.generate_quadrature(order=self.quad_order,
                                                dist=self.distribution,
                                                rule=quadrature_rule,
                                                sparse=sparse,
                                                growth=self.quad_growth)

        # Number of samples
        self._number_of_samples = len(self._nodes[0])

        # Fast forward to specified count, if possible
        self.count = 0
        if self.count >= self._number_of_samples:
            msg = (
                f"Attempt to start sampler fastforwarded to count {self.count}, "
                f"but sampler only has {self._number_of_samples} samples, therefore"
                f"this sampler will not provide any more samples.")
            logging.warning(msg)
        else:
            for i in range(count):
                self.__next__()
Example #34
0
x = numpy.linspace(0, 10, 1000)

# Defining the random input distributions:
a = cp.Uniform(0, 0.1)
I = cp.Uniform(8, 10)
dist = cp.J(a, I)

num_tests = 100
order = 4

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
P, norms = cp.orth_ttr(order - 2, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1,
                                        dist,
                                        rule="G",
                                        sparse=False)
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

test_inputs = dist.sample(num_tests)
test_outputs = numpy.array([u(x, s[0], s[1]) for s in test_inputs.T])
surrogate_test_outputs = numpy.array(
    [U_hat(s[0], s[1]) for s in test_inputs.T])

print "mean l2 error", numpy.mean(
    numpy.linalg.norm(test_outputs - surrogate_test_outputs, axis=0))
print "mean l2 norm", numpy.mean(numpy.linalg.norm(test_outputs, axis=0))

i = 10
plt.scatter(test_outputs[:, i], surrogate_test_outputs[:, i])
Example #35
0
import chaospy
import chaospy as cp
import pandas as pd
import numpy as np

QUAD_ORDER = 18
quad = False


def f(x, y):
    return (1 - x)**2 * 10 * (y - x**2)**2


distribution = chaospy.J(chaospy.Normal(0, 1), chaospy.Normal(0, 1))

if quad:
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    X, W = chaospy.generate_quadrature(QUAD_ORDER, distribution, rule="G")
    evals = [f(x[0], x[1]) for x in X.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, X, W, evals)
else:
    dat = pd.read_csv('./dakota_tabular.dat', sep=r'\s+')
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    samples = np.array([dat.x1, dat.x2])
    evals = dat.response_fn_1
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)

total = chaospy.descriptives.sensitivity.total.Sens_t(foo_approx, distribution)
main = chaospy.descriptives.sensitivity.main.Sens_m(foo_approx, distribution)
Example #36
0
def get_nodes_weigths(dist, quad_deg, rule):
	nodes, weights = cp.generate_quadrature(dist, quad_deg, rule=rule)

	return nodes, weights
Example #37
0
	def get_quad_points(self, quad_deg, dist):
		nodes, weights = cp.generate_quadrature(quad_deg, dist, rule="G")

		return nodes, weights
Example #38
0
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R = dist_R.sample(2 * len(P), "M")
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_regression(P, nodes_R, samples_u)

#Rosenblat transformation using pseudo spectral


def u(x, a, I):
    return I * np.exp(-a * x)


C = [[1, 0.5], [0.5, 1]]
mu = np.array([0, 0])
dist_R = cp.J(cp.Normal(), cp.Normal())
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R, weights_R = cp.generate_quadrature(3, dist_R)
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))
weights_Q = weights_R * dist_Q.pdf(nodes_Q) / dist_R.pdf(nodes_R)

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_quadrature(P, nodes_R, weights_Q, samples_u)
def run_test(testi, typid, exceed_evals=None, evals_end=None):
    problem_function_wrapped = FunctionCustom(
        lambda x: problem_function(x),
        output_dim=problem_function.output_length())
    op.f = problem_function_wrapped

    measure_start = time.time()
    multiple_evals = None
    typ = types[typid]
    if typ not in ("Gauss", "Fejer", "sparseGauss"):
        do_inverse_transform = typ in ("adaptiveTransBSpline",
                                       "adaptiveTransTrapez",
                                       "adaptiveTransHO")
        if do_inverse_transform:
            a_trans, b_trans = np.zeros(dim), np.ones(dim)

        if typ == "adaptiveHO":
            grid = GlobalHighOrderGridWeighted(a,
                                               b,
                                               op,
                                               boundary=uniform_distr)
        elif typ in ("adaptiveTrapez", "Trapez"):
            grid = GlobalTrapezoidalGridWeighted(a,
                                                 b,
                                                 op,
                                                 boundary=uniform_distr)
        elif typ == "adaptiveLagrange":
            grid = GlobalLagrangeGridWeighted(a, b, op, boundary=uniform_distr)
        elif typ == "adaptiveTransBSpline":
            grid = GlobalBSplineGrid(a_trans, b_trans, boundary=uniform_distr)
        elif typ == "adaptiveTransTrapez":
            grid = GlobalTrapezoidalGrid(a_trans,
                                         b_trans,
                                         boundary=uniform_distr)
        elif typ == "adaptiveTransHO":
            grid = GlobalHighOrderGrid(a_trans,
                                       b_trans,
                                       boundary=uniform_distr,
                                       split_up=False)
        op.set_grid(grid)

        if do_inverse_transform:
            # Use Integration operation
            # ~ f_refinement = op.get_inverse_transform_Function(op.get_PCE_Function(poly_deg_max))
            f_refinement = op.get_inverse_transform_Function(
                op.get_expectation_variance_Function())
            # ~ f_refinement.plot(np.array([0.001]*2), np.array([0.999]*2), filename="trans.pdf")
            op_integration = Integration(f_refinement, grid, dim)
            combiinstance = SpatiallyAdaptiveSingleDimensions2(
                a_trans, b_trans, operation=op_integration, norm=2)
        else:
            combiinstance = SpatiallyAdaptiveSingleDimensions2(a,
                                                               b,
                                                               operation=op,
                                                               norm=2)
            f_refinement = op.get_expectation_variance_Function()

        lmax = 3
        if typ == "Trapez":
            lmax = testi + 2
        if evals_end is not None:
            multiple_evals = dict()
            combiinstance.performSpatiallyAdaptiv(
                1,
                lmax,
                f_refinement,
                error_operator,
                tol=0,
                max_evaluations=evals_end,
                print_output=True,
                solutions_storage=multiple_evals)
        elif exceed_evals is None or typ == "Trapez":
            combiinstance.performSpatiallyAdaptiv(1,
                                                  lmax,
                                                  f_refinement,
                                                  error_operator,
                                                  tol=0,
                                                  max_evaluations=1,
                                                  print_output=verbose)
        else:
            combiinstance.performSpatiallyAdaptiv(
                1,
                lmax,
                f_refinement,
                error_operator,
                tol=np.inf,
                max_evaluations=np.inf,
                min_evaluations=exceed_evals + 1,
                print_output=verbose)

        # ~ combiinstance.plot()
        # Calculate the gPCE using the nodes and weights from the refinement
        # ~ op.calculate_PCE(None, combiinstance)
        if multiple_evals is None:
            E, Var = op.calculate_expectation_and_variance(combiinstance)
    else:
        # ~ polys, polys_norms = cp.orth_ttr(poly_deg_max, op.distributions_joint, retall=True)
        if typ == "Gauss":
            if testi >= 29:
                # Reference solution or negative points
                return np.inf
            nodes, weights = cp.generate_quadrature(testi,
                                                    op.distributions_joint,
                                                    rule="G")
        elif typ == "Fejer":
            nodes, weights = cp.generate_quadrature(testi,
                                                    op.distributions_joint,
                                                    rule="F",
                                                    normalize=True)
        elif typ == "sparseGauss":
            level = testi + 1
            if level > 5:
                # normal distribution has infinite bounds
                return np.inf
            expectations = [distr[1] for distr in distris]
            standard_deviations = [distr[2] for distr in distris]
            hgrid = GaussHermiteGrid(expectations, standard_deviations)
            op.set_grid(hgrid)
            combiinstance = StandardCombi(a, b, operation=op)
            combiinstance.perform_combi(1, level,
                                        op.get_expectation_variance_Function())
            nodes, weights = combiinstance.get_points_and_weights()
            nodes = nodes.T

        # ~ f_evals = [problem_function_wrapped(c) for c in zip(*nodes)]
        # ~ op.gPCE = cp.fit_quadrature(polys, nodes, weights, np.asarray(f_evals), norms=polys_norms)
        E, Var = op.calculate_expectation_and_variance_for_weights(
            nodes, weights)

    print("simulation time: " + str(time.time() - measure_start) + " s")

    # ~ if False:
    # ~ E, var = op.calculate_expectation_and_variance(combiinstance)
    # ~ E_pX = reshape_result_values(E)
    # ~ Var = reshape_result_values(var)

    def reshape_result_values(vals):
        return vals[0]

    tmpdir = os.getenv("XDG_RUNTIME_DIR")
    results_path = tmpdir + "/uqtestSD.npy"
    solutions_data = []
    if os.path.isfile(results_path):
        solutions_data = list(np.load(results_path, allow_pickle=True))

    if multiple_evals is None:
        E = reshape_result_values(E)
        Var = reshape_result_values(Var)

        # ~ err_descs = ("E prey", "P10 prey", "P90 prey", "Var prey")
        err_descs = ("E prey", "Var prey")
        err_data = (
            (E, E_ref),
            # ~ (P10, P10_ref),
            # ~ (P90, P90_ref),
            (Var, Var_ref))
        errors = []
        for i, desc in enumerate(err_descs):
            vals = err_data[i]
            abs_err = error_absolute(*vals)
            rel_err = error_relative(*vals)
            errors.append(abs_err)
            errors.append(rel_err)
            print(
                f"{desc}: {vals[0]}, absolute error: {abs_err}, relative error: {rel_err}"
            )

        num_evals = problem_function_wrapped.get_f_dict_size()
        result_data = (num_evals, timestep_problem, typid, errors)
        assert len(result_data) == 4
        assert len(errors) == 4

        if all([
                any([d[i] != result_data[i] for i in range(3)])
                for d in solutions_data
        ]):
            solutions_data.append(result_data)
            np.save(results_path, solutions_data)

        return num_evals

    solutions = op.calculate_multiple_expectation_and_variance(multiple_evals)
    for num_evals, E, Var in solutions:
        E = reshape_result_values(E)
        Var = reshape_result_values(Var)

        # ~ err_descs = ("E prey", "P10 prey", "P90 prey", "Var prey")
        err_descs = ("E prey", "Var prey")
        err_data = (
            (E, E_ref),
            # ~ (P10, P10_ref),
            # ~ (P90, P90_ref),
            (Var, Var_ref))
        errors = []
        for i, desc in enumerate(err_descs):
            vals = err_data[i]
            abs_err = error_absolute(*vals)
            rel_err = error_relative(*vals)
            errors.append(abs_err)
            errors.append(rel_err)
            print(
                f"{desc}: {vals[0]}, absolute error: {abs_err}, relative error: {rel_err}"
            )

        result_data = (num_evals, timestep_problem, typid, errors)
        assert len(result_data) == 4
        assert len(errors) == 4

        if all([
                any([d[i] != result_data[i] for i in range(3)])
                for d in solutions_data
        ]):
            solutions_data.append(result_data)
    np.save(results_path, solutions_data)

    return problem_function_wrapped.get_f_dict_size()
    def test_convergence_circuit_model(self, bool_plot: bool = False):
        x_lower = np.array([50, 25, 0.5, 1.2, 0.25,
                            50])  # table 3, constantine-2017
        x_upper = np.array([150, 70, 3.0, 2.5, 1.2,
                            300])  # table 3, constantine-2017

        activity_scores_constantine = np.array([
            2.377860943309341, 1.619026815363377, 0.261741461441246,
            0.075234628507027, 0.000000116801952, 0.000065807942335
        ])  # calculated with constantine's matlab scripts (constantine-2017)
        first_eigenvector_constantine = np.array([
            0.740716965496176, -0.611203856808294, -0.245751018607724,
            0.131755257933889, 0.000164166339828, 0.003896711210227
        ])
        eigenvalues_C_hat_constantine = np.array([
            4.333929773365277, 0.172154546775767, 0.043837280605887,
            0.008740767183207, 0.000130619772892, 0.000000006588527
        ])

        relative_error_alpha_constantine = \
            np.array([[1.0336576094e-01, 7.4484410837e-02, 3.2199103665e-02, 2.3745185339e-02, 7.3669706642e-03,
                       7.6656239406e-03, 3.1840067208e-03],
                      [3.7870122253e-02, 2.9914292211e-02, 1.5144943581e-02, 8.7714103148e-03, 4.4700495080e-03,
                       2.3181577249e-03, 1.1608862367e-03],
                      [5.5327059088e-03, 3.8373262294e-03, 2.1444430111e-03, 1.2336853412e-03, 6.8385574590e-04,
                       4.6215573641e-04, 1.6012529246e-04],
                      [4.7273235250e-03, 3.6211927084e-03, 1.7887246903e-03, 1.0458653419e-03, 6.1191072469e-04,
                       4.0120287646e-04, 1.2149120835e-04],
                      [9.2850207014e-09, 5.7034221317e-09, 3.2097814749e-09, 1.9626299343e-09, 7.6701151806e-10,
                       4.7641920831e-10, 2.2068876806e-10],
                      [8.8701371027e-06, 5.8444088089e-06, 2.9941349453e-06, 1.8812300039e-06, 8.3129987185e-07,
                       5.5281073089e-07, 2.2441932087e-07]])

        relative_error_w_constantine = \
            np.array([[2.7714612059e-02, 1.5914810891e-02, 7.2787857034e-03, 5.6092823993e-03, 2.0922412569e-03,
                       1.5990443476e-03, 5.9951083677e-04],
                      [2.6597648919e-02, 1.8193945825e-02, 8.0722129599e-03, 5.7725628927e-03, 2.3120501167e-03,
                       1.7137227490e-03, 6.8505176689e-04],
                      [1.6522393310e-02, 8.9553701043e-03, 4.2085081539e-03, 3.1165970129e-03, 1.1959665435e-03,
                       7.5160538294e-04, 3.5689233942e-04],
                      [1.5741593060e-02, 1.0683193332e-02, 4.3969836718e-03, 3.4894171678e-03, 1.4456638503e-03,
                       8.6030138247e-04, 3.7841702257e-04],
                      [2.1623799573e-05, 1.1406089340e-05, 8.2572277276e-06, 4.7396837444e-06, 1.9973336116e-06,
                       1.3417606303e-06, 5.2939720012e-07],
                      [8.0536892552e-04, 4.8504474422e-04, 3.4402170980e-04, 1.7722519510e-04, 8.3146852712e-05,
                       5.4381142033e-05, 2.1796009955e-05]])

        circuit_model = CircuitModel()
        dim = circuit_model.get_dimension()
        density_type = "uniform"

        test_input = x_lower
        k = 1 + 1
        bool_gradient = True
        M_boot = 0
        step_size = None
        case = None
        seed = 2456354
        bool_averaged = False
        no_runs_averaged = 1
        bool_save_data = False
        bool_print = False
        path2results = None
        key = ["$R_b1$", "$R_b2$", "$R_f$", "$R_c1$", "$R_c2$", "$beta$"]

        alpha_vec = np.array([5e1, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4
                              ]) / np.log(dim) / k

        # Gauss-Legendre quadrature approximation of C
        # abscissas1, weights1 = np.polynomial.legendre.leggauss(np.power(7,6)) -> MemoryError

        distribution = cp.J(cp.Uniform(-1, 1), cp.Uniform(-1, 1),
                            cp.Uniform(-1, 1), cp.Uniform(-1, 1),
                            cp.Uniform(-1, 1), cp.Uniform(-1, 1))
        abscissas, weights = cp.generate_quadrature(dim,
                                                    distribution,
                                                    rule="gauss_legendre")
        abscissas_scaled = transform_coordinates_from_unit(
            x_lower, x_upper, abscissas)

        assert ((np.max(abscissas_scaled, axis=1) <= x_upper).all())
        assert ((np.max(abscissas_scaled, axis=1) >= x_lower).all())

        gradient_evals = circuit_model.eval_gradient(abscissas_scaled)

        # transformation of weights for interval [a,b] instead of [-1,1] -> factor (b-a)/2
        gradient_evals_scaled = np.matmul(0.5 * np.diag(x_upper - x_lower),
                                          gradient_evals)

        gradient_evals_scaled_weighted = np.multiply(gradient_evals_scaled,
                                                     np.sqrt(weights))
        C_tmp = np.matmul(gradient_evals_scaled_weighted,
                          np.transpose(gradient_evals_scaled_weighted))

        n_samples = len(weights)
        tmp_sum = np.zeros(shape=(dim, dim))
        for i in range(0, n_samples):
            tmp = np.expand_dims(gradient_evals_scaled[:, i],
                                 axis=1) * gradient_evals_scaled[:, i]
            tmp_sum = tmp_sum + weights[i] * tmp
        C_quadrature_approximation = tmp_sum

        nptest.assert_array_almost_equal(C_quadrature_approximation, C_tmp)

        # calc eigenvalues
        rho = UniformGenMult(x_lower, x_upper, dim)
        activity_scores_quad, __, w_active_quad, __, __, __, __, __, __, lambda_quad, __, __ = calc_activity_scores_from_C(
            C_quadrature_approximation,
            circuit_model,
            rho,
            True,
            dim,
            force_idx_gap=0)

        first_eigenvector_quad = w_active_quad[:, 0]

        nptest.assert_array_almost_equal(lambda_quad,
                                         eigenvalues_C_hat_constantine)
        nptest.assert_array_almost_equal(activity_scores_quad,
                                         activity_scores_constantine)
        assert_allclose_eigenvectors(first_eigenvector_quad,
                                     first_eigenvector_constantine)

        ######################################################## Calc activity scores with Algorithm 1.1
        idx = 0
        activity_scores = np.zeros(shape=(dim, len(alpha_vec)))
        activity_score_error_av = np.zeros(shape=(dim, len(alpha_vec)))

        first_eigenvector = np.zeros(shape=(dim, len(alpha_vec)))
        first_eigenvector_error_av = np.zeros(shape=(dim, len(alpha_vec)))
        n_samples = np.zeros(len(alpha_vec))
        n_trials = 10

        for alpha in alpha_vec:
            activity_score_av = np.zeros(shape=dim)
            first_eigenvector_entry_av = np.zeros(shape=dim)
            tmp_activity_score_error_av = np.zeros(shape=dim)
            tmp_first_eigenvector_error_av = np.zeros(shape=dim)
            for trial in range(
                    0, n_trials
            ):  # in constantine-2017 scores are averaged over 10 trials
                seed = int(np.random.rand(1) * (2**32 - 1))
                # Active Subspaces
                max_rel_error_eig, error_c_gradients, tmp_activity_score, \
                __, size_subspace, path_to_files, tmp_n_samples, lambda_eig, \
                w_active, test_y, __, idx_gap, __, distance_subspace, __ = \
                    active_subspace_with_gradients(
                        circuit_model, density_type, x_lower, x_upper, test_input, alpha, k, bool_gradient, M_boot,
                        step_size, step_size, case, seed, bool_averaged, no_runs_averaged, bool_save_data, bool_print,
                        bool_plot, path2results, force_idx_gap=0)

                # normalize
                nptest.assert_array_almost_equal(
                    np.linalg.norm(w_active[:, 0]), 1)
                tmp_first_eigenvector = w_active[:, 0] * np.sign(w_active[0,
                                                                          0])

                first_eigenvector_entry_av = first_eigenvector_entry_av + tmp_first_eigenvector
                activity_score_av = activity_score_av + tmp_activity_score
                tmp_activity_score_error = relative_error_constantine_2017(
                    tmp_activity_score, activity_scores_quad)
                tmp_activity_score_error_av = tmp_activity_score_error_av + tmp_activity_score_error

                tmp_first_eigenvector_error = relative_error_constantine_2017(
                    tmp_first_eigenvector * np.sign(tmp_first_eigenvector[0]),
                    first_eigenvector_quad *
                    np.sign(first_eigenvector_quad[0]))
                tmp_first_eigenvector_error_av = tmp_first_eigenvector_error_av + tmp_first_eigenvector_error

            n_samples[idx] = tmp_n_samples
            activity_scores[:, idx] = activity_score_av / n_trials
            activity_score_error_av[:,
                                    idx] = tmp_activity_score_error_av / n_trials
            first_eigenvector[:, idx] = first_eigenvector_entry_av / n_trials
            first_eigenvector_error_av[:,
                                       idx] = tmp_first_eigenvector_error_av / n_trials

            idx = idx + 1

        # compare this implementation with results of constantine from constantine-2017 (Matlab code)
        nptest.assert_allclose(activity_score_error_av,
                               relative_error_alpha_constantine,
                               rtol=1)
        nptest.assert_allclose(first_eigenvector_error_av,
                               relative_error_w_constantine,
                               rtol=1)

        # check convergence rate
        for i in range(0, dim):
            p = np.polyfit(np.log10(n_samples),
                           np.log10(activity_score_error_av[i, :]), 1)
            # Convergence rate of M^(-1/2) according to constantine-2017
            nptest.assert_almost_equal(p[0], -0.5, decimal=1)

        if bool_plot:
            col = np.array([[0, 0.4470, 0.7410], [0.8500, 0.3250, 0.0980],
                            [0.9290, 0.6940, 0.1250], [0.4940, 0.1840, 0.5560],
                            [0.4660, 0.6740, 0.1880], [0.3010, 0.7450,
                                                       0.9330]])

            # Plot relative error in activity scores
            plt.figure()
            for i in range(0, dim):
                # plt.loglog(n_samples, relative_error_alpha[i, :], 'o-', label=key[i], color=col[i, :])
                plt.loglog(n_samples,
                           activity_score_error_av[i, :],
                           'o-',
                           label=key[i],
                           color=col[i, :])
                plt.loglog(n_samples,
                           relative_error_alpha_constantine[i, :],
                           'x:',
                           color=col[i, :])
            plt.ylabel("Relative error (activity score)")
            plt.xlabel("Number of MC samples")
            plt.legend()
            plt.ylim([1e-9, 1e0])
            plt.xlim([5e1, 5e4])
            plt.grid()

            # Plot relative error in first eigenvector scores
            plt.figure()
            for i in range(0, dim):
                plt.loglog(n_samples,
                           first_eigenvector_error_av[i, :],
                           'o-',
                           label=key[i],
                           color=col[i, :])
                plt.loglog(n_samples,
                           relative_error_w_constantine[i, :],
                           'x:',
                           color=col[i, :])

            plt.ylabel("Relative error (first eigenvector)")
            plt.xlabel("Number of MC samples")
            plt.legend()
            plt.ylim([1e-9, 1e0])
            plt.xlim([5e1, 5e4])
            plt.grid()
            plt.show()
Example #41
0
def u(z):
    return campaspe_toy.run(*z, nrow=10)


# Defining the random input distributions:
dists = [cp.Uniform(0, 0.5) for i in range(3)]
dists.append(cp.Uniform(-200.0, 50.0))
dist = cp.J(*dists)

num_tests = 100
order = 3

## Polynomial chaos expansion
## using Pseudo-spectral method and Gaussian Quadrature
P, norms = cp.orth_ttr(order - 2, dist, retall=True)
nodes, weights = cp.generate_quadrature(order + 1, dist, rule="G", sparse=False)
# solves = [u(s) for s in nodes.T]
solves = parmap(u, nodes.T)  # [u(s) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

test_inputs = dist.sample(num_tests)
test_outputs = numpy.array([u(s) for s in test_inputs.T])
surrogate_test_outputs = numpy.array([U_hat(*s) for s in test_inputs.T])

print "mean l2 error", numpy.mean(numpy.linalg.norm(test_outputs - surrogate_test_outputs, axis=0))
print "mean l2 norm", numpy.mean(numpy.linalg.norm(test_outputs, axis=0))

# scatter for all QOI
num_qoi = test_outputs.shape[1]
for qoi_i in range(num_qoi):
    plt.subplot(numpy.ceil(num_qoi / 3.0), 3, qoi_i + 1)
Example #42
0
def getPointsModifiedAmaliaDistribution(dist, method_dict, n):

    # Modify the input range to start at max probability location
    # and account for zero probability regions.

    # f(x)
    #   |                   *
    #   |   ***            * *      **
    #   | **   *          *   **  **  *     ***
    #   |*      *        *      **     *  **   *
    #   |        *      *               **
    #   |         *    *
    # --+----------****-----+------------------+--
    #  lo          A  B     C                  hi    (x)

    method = method_dict['method']
    bnd = dist.range()
    a = bnd[0]  # left boundary
    b = bnd[1]  # right boundary
    a = a[0]  # get rid of the list
    b = b[0]  # get rid of the list
    # Make sure the A, B, C values are the same than those in distribution
    A, B = dist.get_zero_probability_region()
    # A = 110  # Left boundary of zero probability region
    # B = 140  # Right boundary of zero probability region

    C = 225  # Location of max probability or desired starting location.  Don't put this between A and B.
    r = b-a  # original range
    R = r - (B-A) # modified range

    # Modify with offset, manually choose the offset you want
    N = method_dict['Noffset']  # N = 10
    i = method_dict['offset']  # i = [0, 1, 2, N-1]

    if method == 'rect':
        # the offset fits N points in the given dx interval
        dx = R/n
        offset = i*dx/N  # make sure this is float
        bounds = [a+offset, R+offset]
        x = np.linspace(bounds[0], bounds[1], n+1)
        x = x[:-1]+dx/2  # Take the midpoints of the bins
        # Modify x, to start from the max probability location
        x = modifyx(x, A, B, C, r)
        # Get the weights associated with the points locations
        w = getWeights(x, dx, dist)

    if method == 'dakota':

        # Modify the starting point C with offset
        offset = i*r/N  # the offset modifies the starting point for N locations within the whole interval
        C = (C + offset) % r
        x, f = generate_direction_abscissas_ordinates(a, A, B, C, r, R, dist)
        updateDakotaFile(method_dict, n, x, f)
        # run Dakota file to get the points locations
        x, w = getSamplePoints(method_dict['dakota_filename'])
        assert len(x) == 1, 'Should only be returning the directions'
        x = np.array(x[0])
        # Rescale x
        x = R*x/2. + R/2. + a
        # x = (330/2. + 330/2.*x  # Should be in terms of the variables
        # Call modify x with the new x.
        x = modifyx(x, A, B, C, r)

    if method == 'chaospy':
        # I need to adjust the starting position and all of that.
        x, w = cp.generate_quadrature(n-1, dist, rule='G')
        x = x[0]

    return x, w