def generateDistribution(dist_type, xlim):
    if dist_type == "uniform":
        return Uniform(xlim[0], xlim[1])
    elif dist_type == "beta":
        return Beta(5, 4, xlim[0], xlim[1] - xlim[0])
    else:
        raise AttributeError("dist type unknown")
Exemple #2
0
    def __init__(self, data, sample_type=None, dist=None):
        from pysgpp.extensions.datadriven.uq.dists import Uniform, Beta, SGDEdist, Normal, GaussianKDEDist
        from pysgpp.extensions.datadriven.uq.quadrature.marginalization.marginalization import doMarginalize

        # fix stochastic setting
        self.alpha, self.beta = 5., 10.
        self.lwr, self.upr = 0., 1.
        self.normal = Normal(0, 1, -2, 2)
        self.uniform = Uniform(self.lwr, self.upr)
        self.b = Beta(self.alpha, self.beta, self.lwr, self.upr)
        self.dim = data.shape[0]

        if sample_type == 'cbeta':
            # marginalize the density
            opMar = createOperationDensityMargTo1DKDE(dist.dist)
            kdex = GaussianKDE()
            opMar.margToDimX(kdex, 0)
            kdey = GaussianKDE()
            opMar.margToDimX(kdey, 1)

            # set the mean vector and the correlation matrix
            self.x = [GaussianKDEDist(kdex.getSamples().array()),
                      GaussianKDEDist(kdey.getSamples().array())]
            self.M = np.array([[kdex.mean(), kdey.mean()]]).T
            self.S = dist.corrcoeff()
        else:
            self.x = [self.b, self.b]
            self.M = np.array([[self.b.mean(), self.b.mean()]]).T
            self.S = np.array([[1., 0.],
                               [0., 1.]])

        # compute the correlation matrix from the covariance matrix
        # this is used to transform the results back to the original space
        self.D = np.diag(np.sqrt(np.diag(self.S)))
        # divide the diagonal by the standard deviation of the diagonal elements
        self.D_inverse = np.diag(1. / np.sqrt(np.diag(self.S)))
        self.C = self.D_inverse.dot(self.S.dot(self.D_inverse))

#         fig = plt.figure()
#         plotDensity1d(self.x[0])
#         plotDensity1d(self.b)
#         fig.show()
#
#         fig = plt.figure()
#         plotDensity1d(self.x[1])
#         plotDensity1d(self.b)
#         fig.show()

        # compute cholesky decomposition
        self.L = np.linalg.cholesky(self.C)

        # adjust it according to [Lu ...]
        # nothing needs to be done for uniform <--> uniform
        self.L = self.L
        self.L_inverse = np.linalg.inv(self.L)

        assert abs(np.sum(self.C - self.L.dot(self.L.T))) < 1e-14
        assert abs(np.sum(self.S - self.D.dot(self.L.dot(self.L.T.dot(self.D))))) < 1e-14
Exemple #3
0
def generateDistribution(dist_type, xlim=None, alpha=None):
    if dist_type == "uniform":
        return Uniform(xlim[0], xlim[1])
    elif dist_type == "beta":
        return Beta(5, 4, xlim[0], xlim[1] - xlim[0])
    elif dist_type == "lognormal":
        return TLognormal.by_alpha(1e-12, np.exp(-1), alpha=alpha)
    else:
        raise AttributeError("dist type '%s' unknown" % dist_type)
 def _extractPDFforMomentEstimation(self, U, T):
     dists = U.getDistributions()
     vol = 1.
     # check if importance sampling has been used for some parameters
     for i, trans in enumerate(T.getTransformations()):
         # if this is the case replace them by a uniform distribution
         if isinstance(trans, InverseCDFTransformation):
             dists[i] = Uniform(0, 1)
         else:
             vol *= trans.vol()
     return vol, J(dists)
Exemple #5
0
 def _extractPDFforMomentEstimation(self, U, T):
     dists = []
     jointTrans = []
     vol = 1.
     # check if importance sampling has been used for some parameters
     for i, trans in enumerate(T.getTransformations()):
         # if this is the case replace them by a uniform distribution
         if isinstance(trans, RosenblattTransformation):
             for _ in range(trans.getSize()):
                 dists.append(Uniform(0, 1))
                 jointTrans.append(LinearTransformation(0.0, 1.0))
         else:
             vol *= trans.vol()
             dists.append(U.getDistributions()[i])
             jointTrans.append(trans)
     return vol, J(dists), jointTrans
Exemple #6
0
    def __extractDiscretePDFforMomentEstimation(self, U, T):
        dists = U.getDistributions()
        vol = 1.
        err = 0.
        # check if importance sampling has been used for some parameters
        for i, trans in enumerate(T.getTransformations()):
            # if this is the case replace them by a uniform distribution
            if isinstance(trans, InverseCDFTransformation):
                grid, alpha, erri = Uniform(0, 1).discretize(level=2)
            else:
                vol *= trans.vol()
                grid, alpha, erri = dists[i].discretize(level=10)

            dists[i] = SGDEdist.fromSGFunction(grid, alpha)
            err += erri
        return vol, J(dists), err
Exemple #7
0
    def testMarginalEstimationStrategy(self):
        xlim = np.array([[-1, 1], [-1, 1]])
        trans = JointTransformation()
        dists = []
        for idim in range(xlim.shape[0]):
            trans.add(LinearTransformation(xlim[idim, 0], xlim[idim, 1]))
            dists.append(Uniform(xlim[idim, 0], xlim[idim, 1]))
        dist = J(dists)

        def f(x):
            return np.prod([(1 + xi) * (1 - xi) for xi in x])

        def F(x):
            return 1. - x**3 / 3.

        grid, alpha_vec = interpolate(f,
                                      1,
                                      2,
                                      gridType=GridType_Poly,
                                      deg=2,
                                      trans=trans)
        alpha = alpha_vec.array()

        q = (F(1) - F(-1))**2
        q1 = doQuadrature(grid, alpha)
        q2 = AnalyticEstimationStrategy().mean(grid, alpha, dist,
                                               trans)["value"]

        self.assertTrue(abs(q - q1) < 1e-10)
        self.assertTrue(abs(q - q2) < 1e-10)

        ngrid, nalpha, _ = MarginalAnalyticEstimationStrategy().mean(
            grid, alpha, dist, trans, [[0]])

        self.assertTrue(abs(nalpha[0] - 2. / 3.) < 1e-10)

        plotSG3d(grid, alpha)
        plt.figure()
        plotSG1d(ngrid, nalpha)
        plt.show()
Exemple #8
0
def example8(dist_type="uniform"):
    operation = pysgpp.CombigridOperation.createExpClenshawCurtisPolynomialInterpolation(
        d, func)

    config = pysgpp.OrthogonalPolynomialBasis1DConfiguration()

    if dist_type == "beta":
        config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_JACOBI
        config.polyParameters.alpha_ = 5
        config.polyParameters.alpha_ = 4

        U = J(
            [Beta(config.polyParameters.alpha_, config.polyParameters.beta_)] *
            d)
    else:
        config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_LEGENDRE
        U = J([Uniform(0, 1)] * d)

    basisFunction = pysgpp.OrthogonalPolynomialBasis1D(config)
    basisFunctions = pysgpp.OrthogonalPolynomialBasis1DVector(d, basisFunction)

    q = 3
    operation.getLevelManager().addRegularLevels(q)
    print("Total function evaluations: %i" % operation.numGridPoints())
    ## compute variance of the interpolant

    surrogateConfig = pysgpp.CombigridSurrogateModelConfiguration()
    surrogateConfig.type = pysgpp.CombigridSurrogateModelsType_POLYNOMIAL_CHAOS_EXPANSION
    surrogateConfig.loadFromCombigridOperation(operation)
    surrogateConfig.basisFunction = basisFunction
    pce = pysgpp.createCombigridSurrogateModel(surrogateConfig)

    n = 10000
    values = [g(pysgpp.DataVector(xi)) for xi in U.rvs(n)]
    print("E(u)   = %g ~ %g" % (np.mean(values), pce.mean()))
    print("Var(u) = %g ~ %g" % (np.var(values), pce.variance()))
Exemple #9
0
    def testSettings(self):
        builder = ParameterBuilder()
        dp = builder.defineDeterministicParameters()
        up = builder.defineUncertainParameters()

        # ============================================
        # 1)
        up.new().isCalled('v')\
                .withDistribution(Uniform(0, 1))\
                .withRosenblattTransformation()
        # --------------------------------------------
        # 2)
        up.new().isCalled('density')\
                .withDistribution(Uniform(-1, 1))\
                .hasValue(0.0)
        # --------------------------------------------
        # 3)
        up.new().isCalled('K')\
                .withDistribution(TNormal(0, 1, -3, 2))\
                .hasValue(-3)
        # --------------------------------------------
        # 4)
        up.new().isCalled('theta')\
                .withDistribution(TNormal(0, 1, -2, 2))\
                .withLinearTransformation()
        # --------------------------------------------
        # 5)
        up.new().isCalled('blub')\
                .withUniformDistribution(-1, 1)
        # --------------------------------------------
        # 6)
        dp.new().isCalled('radius').hasValue(2)
        # ============================================

        params = builder.andGetResult()

        # test dimensions
        assert params.getDim() == 6
        assert params.getStochasticDim() == 3
        assert len(params.activeParams()) == 3
        assert params.getStochasticDim() == len(params.activeParams())
        assert params.getDim() - len(params.uncertainParams()) == \
            len(params.deterministicParams())
        assert params.getStochasticDim() == len(params.getDistributions()) - 2

        jsonStr = params.getJointTransformation().toJson()
        jsonObject = json.loads(jsonStr)
        trans = Transformation.fromJson(jsonObject)

        # test transformations
        ap = params.activeParams()
        assert params.getStochasticDim() == len(ap)
        sampler = MCSampler.withNaiveSampleGenerator(params)

        for sample in sampler.nextSamples(100):
            for x in sample.getActiveUnit():
                assert 0 <= x <= 1
            bounds = params.getBounds()
            q = sample.getExpandedProbabilistic()
            for xlim1, xlim2, x in np.vstack((bounds.T, q)).T:
                assert xlim1 <= x <= xlim2

        params.removeParam(0)
        assert params.getStochasticDim() == len(ap) - 1
        sampler = MCSampler.withNaiveSampleGenerator(params)

        for sample in sampler.nextSamples(100):
            for x in sample.getActiveUnit():
                assert 0 <= x <= 1
            bounds = params.getBounds()
            q = sample.getExpandedProbabilistic()
            for xlim1, xlim2, x in np.vstack((bounds.T, q)).T:
                assert xlim1 <= x <= xlim2
Exemple #10
0
 def withUniformDistribution(self, a, b):
     self._dist = Uniform(a, b)
     return self
Exemple #11
0
if __name__ == "__main__":
    # parse the input arguments
    parser = ArgumentParser(description='Get a program and run it with input')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    parser.add_argument('--level',
                        default=2,
                        type=int,
                        help="minimum level of regular grids")
    parser.add_argument('--marginalType',
                        default="beta",
                        type=str,
                        help="marginals")
    args = parser.parse_args()

    if args.marginalType == "uniform":
        marginal = Uniform(0, 1)
    elif args.marginalType == "beta":
        marginal = Beta(5, 10)
    else:
        marginal = Normal(0.5, 0.1, 0, 1)

    # plot pdf
    dist = J([marginal] * numDims)
    fig = plt.figure()
    plotDensity2d(dist)
    savefig(fig, "/tmp/%s" % (args.marginalType, ))
    plt.close(fig)

    w = pysgpp.singleFunc(marginal.pdf)

    grids = pysgpp.AbstractPointHierarchyVector()