示例#1
0
    def __init__(self, data, sample_type=None, dist=None):
        from pysgpp.extensions.datadriven.uq.dists import Uniform, Beta, SGDEdist, Normal, GaussianKDEDist
        from pysgpp.extensions.datadriven.uq.quadrature.marginalization.marginalization import doMarginalize

        # fix stochastic setting
        self.alpha, self.beta = 5., 10.
        self.lwr, self.upr = 0., 1.
        self.normal = Normal(0, 1, -2, 2)
        self.uniform = Uniform(self.lwr, self.upr)
        self.b = Beta(self.alpha, self.beta, self.lwr, self.upr)
        self.dim = data.shape[0]

        if sample_type == 'cbeta':
            # marginalize the density
            opMar = createOperationDensityMargTo1DKDE(dist.dist)
            kdex = GaussianKDE()
            opMar.margToDimX(kdex, 0)
            kdey = GaussianKDE()
            opMar.margToDimX(kdey, 1)

            # set the mean vector and the correlation matrix
            self.x = [GaussianKDEDist(kdex.getSamples().array()),
                      GaussianKDEDist(kdey.getSamples().array())]
            self.M = np.array([[kdex.mean(), kdey.mean()]]).T
            self.S = dist.corrcoeff()
        else:
            self.x = [self.b, self.b]
            self.M = np.array([[self.b.mean(), self.b.mean()]]).T
            self.S = np.array([[1., 0.],
                               [0., 1.]])

        # compute the correlation matrix from the covariance matrix
        # this is used to transform the results back to the original space
        self.D = np.diag(np.sqrt(np.diag(self.S)))
        # divide the diagonal by the standard deviation of the diagonal elements
        self.D_inverse = np.diag(1. / np.sqrt(np.diag(self.S)))
        self.C = self.D_inverse.dot(self.S.dot(self.D_inverse))

#         fig = plt.figure()
#         plotDensity1d(self.x[0])
#         plotDensity1d(self.b)
#         fig.show()
#
#         fig = plt.figure()
#         plotDensity1d(self.x[1])
#         plotDensity1d(self.b)
#         fig.show()

        # compute cholesky decomposition
        self.L = np.linalg.cholesky(self.C)

        # adjust it according to [Lu ...]
        # nothing needs to be done for uniform <--> uniform
        self.L = self.L
        self.L_inverse = np.linalg.inv(self.L)

        assert abs(np.sum(self.C - self.L.dot(self.L.T))) < 1e-14
        assert abs(np.sum(self.S - self.D.dot(self.L.dot(self.L.T.dot(self.D))))) < 1e-14
示例#2
0
def generateDistribution(dist_type, xlim=None, alpha=None):
    if dist_type == "uniform":
        return Uniform(xlim[0], xlim[1])
    elif dist_type == "beta":
        return Beta(5, 4, xlim[0], xlim[1] - xlim[0])
    elif dist_type == "lognormal":
        return TLognormal.by_alpha(1e-12, np.exp(-1), alpha=alpha)
    else:
        raise AttributeError("dist type '%s' unknown" % dist_type)
示例#3
0
def example8(dist_type="uniform"):
    operation = pysgpp.CombigridOperation.createExpClenshawCurtisPolynomialInterpolation(
        d, func)

    config = pysgpp.OrthogonalPolynomialBasis1DConfiguration()

    if dist_type == "beta":
        config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_JACOBI
        config.polyParameters.alpha_ = 5
        config.polyParameters.alpha_ = 4

        U = J(
            [Beta(config.polyParameters.alpha_, config.polyParameters.beta_)] *
            d)
    else:
        config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_LEGENDRE
        U = J([Uniform(0, 1)] * d)

    basisFunction = pysgpp.OrthogonalPolynomialBasis1D(config)
    basisFunctions = pysgpp.OrthogonalPolynomialBasis1DVector(d, basisFunction)

    q = 3
    operation.getLevelManager().addRegularLevels(q)
    print("Total function evaluations: %i" % operation.numGridPoints())
    ## compute variance of the interpolant

    surrogateConfig = pysgpp.CombigridSurrogateModelConfiguration()
    surrogateConfig.type = pysgpp.CombigridSurrogateModelsType_POLYNOMIAL_CHAOS_EXPANSION
    surrogateConfig.loadFromCombigridOperation(operation)
    surrogateConfig.basisFunction = basisFunction
    pce = pysgpp.createCombigridSurrogateModel(surrogateConfig)

    n = 10000
    values = [g(pysgpp.DataVector(xi)) for xi in U.rvs(n)]
    print("E(u)   = %g ~ %g" % (np.mean(values), pce.mean()))
    print("Var(u) = %g ~ %g" % (np.var(values), pce.variance()))
示例#4
0
class NatafTransformation(object):
    def __init__(self, data, sample_type=None, dist=None):
        from pysgpp.extensions.datadriven.uq.dists import Uniform, Beta, SGDEdist, Normal, KDEDist
        from pysgpp.extensions.datadriven.uq.quadrature.marginalization.marginalization import doMarginalize

        # fix stochastic setting
        self.alpha, self.beta = 5., 10.
        self.lwr, self.upr = 0., 1.
        self.normal = Normal(0, 1, -2, 2)
        self.uniform = Uniform(self.lwr, self.upr)
        self.b = Beta(self.alpha, self.beta, self.lwr, self.upr)
        self.dim = data.shape[0]

        if sample_type == 'cbeta':
            # marginalize the density
            opMar = createOperationDensityMargTo1DKDE(dist.dist)
            kdex = KernelDensityEstimator()
            opMar.margToDimX(kdex, 0)
            kdey = KernelDensityEstimator()
            opMar.margToDimX(kdey, 1)

            # set the mean vector and the correlation matrix
            self.x = [
                KDEDist(kdex.getSamples().array()),
                KDEDist(kdey.getSamples().array())
            ]
            self.M = np.array([[kdex.mean(), kdey.mean()]]).T
            self.S = dist.corrcoeff()
        else:
            self.x = [self.b, self.b]
            self.M = np.array([[self.b.mean(), self.b.mean()]]).T
            self.S = np.array([[1., 0.], [0., 1.]])

        # compute the correlation matrix from the covariance matrix
        # this is used to transform the results back to the original space
        self.D = np.diag(np.sqrt(np.diag(self.S)))
        # divide the diagonal by the standard deviation of the diagonal elements
        self.D_inverse = np.diag(1. / np.sqrt(np.diag(self.S)))
        self.C = self.D_inverse.dot(self.S.dot(self.D_inverse))

        #         fig = plt.figure()
        #         plotDensity1d(self.x[0])
        #         plotDensity1d(self.b)
        #         fig.show()
        #
        #         fig = plt.figure()
        #         plotDensity1d(self.x[1])
        #         plotDensity1d(self.b)
        #         fig.show()

        # compute cholesky decomposition
        self.L = np.linalg.cholesky(self.C)

        # adjust it according to [Lu ...]
        # nothing needs to be done for uniform <--> uniform
        self.L = self.L
        self.L_inverse = np.linalg.inv(self.L)

        assert abs(np.sum(self.C - self.L.dot(self.L.T))) < 1e-14
        assert abs(
            np.sum(self.S -
                   self.D.dot(self.L.dot(self.L.T.dot(self.D))))) < 1e-14

    def trans_U_to_X(self, u_vars, x_vars):
        z_vars = np.zeros(u_vars.shape)
        self.trans_U_to_Z(u_vars, z_vars)
        self.trans_Z_to_X(z_vars, x_vars)

    def trans_X_to_U(self, x_vars, u_vars):
        z_vars = np.zeros(u_vars.shape)

        self.trans_X_to_Z(x_vars, z_vars)
        self.trans_Z_to_U(z_vars, u_vars)

    def trans_Z_to_X(self, z_vars, x_vars):
        for i in range(self.dim):
            normcdf = self.normal.cdf(z_vars[i])
            scaled_x = self.x[i].ppf(normcdf.reshape(len(normcdf), 1))
            scaled_x = scaled_x.reshape(len(normcdf))
            x_vars[i] = self.lwr + (self.upr - self.lwr) * scaled_x

    def trans_X_to_Z(self, x_vars, z_vars):
        for i in range(self.dim):
            betacdf = self.x[i].cdf(x_vars[i].reshape(len(x_vars[i]), 1))
            betacdf = betacdf.reshape(len(betacdf))
            z_vars[i] = self.normal.ppf(betacdf)

    def trans_Z_to_U(self, z_vars, u_vars):
        # decorrelate the variables
        res = self.L_inverse.dot(self.D_inverse.dot(z_vars - self.M))

        # transform to uniform space
        for i, zi in enumerate(res):
            u_vars[i] = self.normal.cdf(zi)

    def trans_U_to_Z(self, u_vars, z_vars):
        # transform to std normal space
        for i, ui in enumerate(u_vars):
            z_vars[i] = self.normal.ppf(ui)

        # apply the correlation
        res = self.D.dot(self.L.dot(z_vars)) + self.M

        # transform to space of correlated normal
        for i, zi in enumerate(res):
            z_vars[i] = zi
示例#5
0
 def withBetaDistribution(self, p, q, accLevel=0., width=1.):
     self._dist = Beta(p, q, accLevel, width)
     return self
示例#6
0
    parser = ArgumentParser(description='Get a program and run it with input')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    parser.add_argument('--level',
                        default=2,
                        type=int,
                        help="minimum level of regular grids")
    parser.add_argument('--marginalType',
                        default="beta",
                        type=str,
                        help="marginals")
    args = parser.parse_args()

    if args.marginalType == "uniform":
        marginal = Uniform(0, 1)
    elif args.marginalType == "beta":
        marginal = Beta(5, 10)
    else:
        marginal = Normal(0.5, 0.1, 0, 1)

    # plot pdf
    dist = J([marginal] * numDims)
    fig = plt.figure()
    plotDensity2d(dist)
    savefig(fig, "/tmp/%s" % (args.marginalType, ))
    plt.close(fig)

    w = pysgpp.singleFunc(marginal.pdf)

    grids = pysgpp.AbstractPointHierarchyVector()
    grids.push_back(pysgpp.CombiHierarchies.linearLeja(w))
    grids.push_back(pysgpp.CombiHierarchies.linearLeja(w))
示例#7
0
from pysgpp.extensions.datadriven.uq.dists import Beta
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad

c = 1

B = Beta(3, 2, 0, c)

X = np.linspace(0, c, 1000)
Y = [B.pdf(xi) for xi in X]

samples = B.rvs(20000)

plt.hist(samples, normed=True)
plt.plot(X, Y)

print(B.mean(), "~", np.mean(samples))
print(B.var(), "~", np.var(samples))
print(quad(B.pdf, 0, c)[0], "~", 1)

plt.show()
class NatafTransformation(object):

    def __init__(self, data, sample_type=None, dist=None):
        from pysgpp.extensions.datadriven.uq.dists import Uniform, Beta, SGDEdist, Normal, GaussianKDEDist
        from pysgpp.extensions.datadriven.uq.quadrature.marginalization.marginalization import doMarginalize

        # fix stochastic setting
        self.alpha, self.beta = 5., 10.
        self.lwr, self.upr = 0., 1.
        self.normal = Normal(0, 1, -2, 2)
        self.uniform = Uniform(self.lwr, self.upr)
        self.b = Beta(self.alpha, self.beta, self.lwr, self.upr)
        self.dim = data.shape[0]

        if sample_type == 'cbeta':
            # marginalize the density
            opMar = createOperationDensityMargTo1DKDE(dist.dist)
            kdex = GaussianKDE()
            opMar.margToDimX(kdex, 0)
            kdey = GaussianKDE()
            opMar.margToDimX(kdey, 1)

            # set the mean vector and the correlation matrix
            self.x = [GaussianKDEDist(kdex.getSamples().array()),
                      GaussianKDEDist(kdey.getSamples().array())]
            self.M = np.array([[kdex.mean(), kdey.mean()]]).T
            self.S = dist.corrcoeff()
        else:
            self.x = [self.b, self.b]
            self.M = np.array([[self.b.mean(), self.b.mean()]]).T
            self.S = np.array([[1., 0.],
                               [0., 1.]])

        # compute the correlation matrix from the covariance matrix
        # this is used to transform the results back to the original space
        self.D = np.diag(np.sqrt(np.diag(self.S)))
        # divide the diagonal by the standard deviation of the diagonal elements
        self.D_inverse = np.diag(1. / np.sqrt(np.diag(self.S)))
        self.C = self.D_inverse.dot(self.S.dot(self.D_inverse))

#         fig = plt.figure()
#         plotDensity1d(self.x[0])
#         plotDensity1d(self.b)
#         fig.show()
#
#         fig = plt.figure()
#         plotDensity1d(self.x[1])
#         plotDensity1d(self.b)
#         fig.show()

        # compute cholesky decomposition
        self.L = np.linalg.cholesky(self.C)

        # adjust it according to [Lu ...]
        # nothing needs to be done for uniform <--> uniform
        self.L = self.L
        self.L_inverse = np.linalg.inv(self.L)

        assert abs(np.sum(self.C - self.L.dot(self.L.T))) < 1e-14
        assert abs(np.sum(self.S - self.D.dot(self.L.dot(self.L.T.dot(self.D))))) < 1e-14

    def trans_U_to_X(self, u_vars, x_vars):
        z_vars = np.zeros(u_vars.shape)
        self.trans_U_to_Z(u_vars, z_vars)
        self.trans_Z_to_X(z_vars, x_vars)

    def trans_X_to_U(self, x_vars, u_vars):
        z_vars = np.zeros(u_vars.shape)

        self.trans_X_to_Z(x_vars, z_vars)
        self.trans_Z_to_U(z_vars, u_vars)

    def trans_Z_to_X(self, z_vars, x_vars):
        for i in xrange(self.dim):
            normcdf = self.normal.cdf(z_vars[i])
            scaled_x = self.x[i].ppf(normcdf.reshape(len(normcdf), 1))
            scaled_x = scaled_x.reshape(len(normcdf))
            x_vars[i] = self.lwr + (self.upr - self.lwr) * scaled_x

    def trans_X_to_Z(self, x_vars, z_vars):
        for i in xrange(self.dim):
            betacdf = self.x[i].cdf(x_vars[i].reshape(len(x_vars[i]), 1))
            betacdf = betacdf.reshape(len(betacdf))
            z_vars[i] = self.normal.ppf(betacdf)

    def trans_Z_to_U(self, z_vars, u_vars):
        # decorrelate the variables
        res = self.L_inverse.dot(self.D_inverse.dot(z_vars - self.M))

        # transform to uniform space
        for i, zi in enumerate(res):
            u_vars[i] = self.normal.cdf(zi)

    def trans_U_to_Z(self, u_vars, z_vars):
        # transform to std normal space
        for i, ui in enumerate(u_vars):
            z_vars[i] = self.normal.ppf(ui)

        # apply the correlation
        res = self.D.dot(self.L.dot(z_vars)) + self.M

        # transform to space of correlated normal
        for i, zi in enumerate(res):
            z_vars[i] = zi