Esempio n. 1
0
 def uniform(self, a=0, b=1):
     # set distributions of the input parameters
     builder = ParameterBuilder()
     up = builder.defineUncertainParameters()
     for idim in range(self.numDims):
         up.new().isCalled("x%i" % idim).withUniformDistribution(a, b)
     return up.andGetResult()
Esempio n. 2
0
 def normal(self, mu=0, sigma=1, alpha=0.001):
     # set distributions of the input parameters
     builder = ParameterBuilder()
     up = builder.defineUncertainParameters()
     for idim in range(self.numDims):
         up.new().isCalled("x%i" % idim).withNormalDistribution(
             mu, sigma, 0.001)
     return builder.andGetResult()
Esempio n. 3
0
def buildSin2Params(dist_type):
    dist = generateDistribution(dist_type, alpha=0.01)

    parameterBuilder = ParameterBuilder()
    up = parameterBuilder.defineUncertainParameters()
    up.new().isCalled("x1").withDistribution(dist)

    return parameterBuilder.andGetResult()
Esempio n. 4
0
def buildBoreholeParams(dist_type):
    boreholeLimits = [[0.05, 0.15], [100, 50000], [63070, 115600], [990, 1110],
                      [63.1, 116], [700, 820], [1120, 1680], [9855, 12045]]
    boreholeParamNames = ["r_w", "r", "T_u", "H_u", "T_l", "H_l", "L", "K_w"]

    parameterBuilder = ParameterBuilder()
    up = parameterBuilder.defineUncertainParameters()

    for k in range(8):
        xlim = boreholeLimits[k]
        dist = generateDistribution(dist_type, xlim)
        up.new().isCalled(boreholeParamNames[k]).withDistribution(dist)

    return parameterBuilder.andGetResult()
Esempio n. 5
0
 def multivariate_normal(self, mu=0, cov=None, a=0, b=1):
     # set distributions of the input parameters
     mu = np.array([mu] * numDims)
     if cov is None:
         # use standard values
         diag = np.eye(numDims) * 0.005
         offdiag = np.abs(np.eye(numDims) - 1) * 0.001
         cov = diag + offdiag
     # estimate the density
     builder = ParameterBuilder()
     up = builder.defineUncertainParameters()
     names = ", ".join(["x%i" for i in range(numDims)])
     up.new().isCalled(names).withMultivariateNormalDistribution(
         mu, cov, 0, 1)
     return builder.andGetResult()
Esempio n. 6
0
    def marginalize(self):
        """
        NOTE: just returns the marginalized active subset of params
        """
        # marginalize the distribution
        margDistList = []
        margTransformations = []
        activeParams = self.activeParams()
        distributions = activeParams.getIndependentJointDistribution(
        ).getDistributions()
        transformations = activeParams.getJointTransformation(
        ).getTransformations()
        if len(distributions) == len(activeParams):
            return self
        else:
            for dist, trans in zip(distributions, transformations):
                # check if current parameter is independent
                if dist.getDim() == 1:
                    margDistList.append(dist)
                    margTransformations.append(trans)
                else:
                    # marginalize the densities and update the transformations
                    innertrans = trans.getTransformations()
                    for idim in range(dist.getDim()):
                        margDist = dist.marginalizeToDimX(idim)
                        margDistList.append(margDist)
                        # update transformations
                        if isinstance(innertrans[idim],
                                      RosenblattTransformation):
                            margTransformations.append(
                                RosenblattTransformation(margDist))
                        else:
                            a, b = margDist.getBounds()
                            margTransformations.append(
                                LinearTransformation(a, b))

            assert len(margDistList) == len(
                margTransformations) == activeParams.getDim()

            # update the parameter setting
            from pysgpp.extensions.datadriven.uq.parameters.ParameterBuilder import ParameterBuilder
            builder = ParameterBuilder()
            up = builder.defineUncertainParameters()
            for name, dist, trans in zip(activeParams.getNames(), margDistList,
                                         margTransformations):
                up.new().isCalled(name).withDistribution(dist)\
                                       .withTransformation(trans)
            return builder.andGetResult()
Esempio n. 7
0
    def refineGrid(self, grid, knowledge, params=None, qoi="_", refinets=[0]):
        # check if this method is used in the right context
        if grid.getType(
        ) not in polyGridTypes + bsplineGridTypes + linearGridTypes:
            raise AttributeError('Grid type %s is not supported' %
                                 grid.getType())

        if params is None:
            # define standard uniform params
            uncertainParams = ParameterBuilder().defineUncertainParameters()
            for idim in range(grid.getStorage().getDimension()):
                uncertainParams.new().isCalled(
                    "x_%i" % idim).withUniformDistribution(0, 1)
            params = uncertainParams.andGetResult()

        # get refinement candidates
        if self.verbose:
            print("compute ranking")
        B = self.candidates(grid, knowledge, params, qoi, refinets)

        # now do the refinement
        return self.__refine(grid, B, simulate=False)
Esempio n. 8
0
    def setUpClass(cls):
        super(MonteCarloStrategyTest, cls).setUpClass()

        builder = ParameterBuilder()
        up = builder.defineUncertainParameters()
        up.new().isCalled('x1').withUniformDistribution(0, 1)
        up.new().isCalled('x2').withUniformDistribution(0, 1)
        cls.params = builder.andGetResult()

        cls.numDims = cls.params.getStochasticDim()

        cls.samples = np.random.random((10000, 1))

        cls.grid = Grid.createPolyGrid(cls.numDims, 2)
        cls.grid.getGenerator().regular(1)
        gs = cls.grid.getStorage()

        # interpolate parabola
        nodalValues = np.zeros(gs.getSize())
        x = DataVector(cls.numDims)
        for i in range(gs.getSize()):
            gs.getCoordinates(gs.getPoint(i), x)
            nodalValues[i] = 16 * (1 - x[0]) * (1 - x[1])
        cls.alpha = hierarchize(cls.grid, nodalValues)
Esempio n. 9
0
from pysgpp.extensions.datadriven.uq.dists.MultivariateNormal import MultivariateNormal
from pysgpp import Grid, DataVector, DataMatrix
from pysgpp.extensions.datadriven.uq.operations import (hierarchize,
                                                        evalSGFunctionMulti)
from pysgpp.extensions.datadriven.uq.operations.forcePositivity import OperationMakePositive
from pysgpp.extensions.datadriven.uq.quadrature import doQuadrature
from pysgpp.extensions.datadriven.uq.parameters.ParameterBuilder import ParameterBuilder
from pysgpp.extensions.datadriven.uq.dists.SGDEdist import SGDEdist

mu = np.array([0.5, 0.5])
cov = np.array([[0.1, 0.04], [0.04, 0.1]]) / 5.

dist = MultivariateNormal(mu, cov, 0, 1)

# setup 2d case
builder = ParameterBuilder()
up = builder.defineUncertainParameters()
up.new().isCalled("x").withLognormalDistribution(0.5, 0.2, alpha=0.001)
up.new().isCalled("y").withBetaDistribution(3, 3, 0, 1)
disst = builder.andGetResult().getIndependentJointDistribution()


class myDist(object):
    def pdf(self, x):
        if x[0] < 0.5 and x[1] < 0.5:
            return 1.
        elif x[0] > 0.5 and x[1] > 0.5:
            return -1.
        elif x[0] > 0.5 and x[1] < 0.5:
            return 2.
        else:
Esempio n. 10
0
    def test_variance_opt(self):
        # parameters
        level = 4

        gridConfig = RegularGridConfiguration()
        gridConfig.type_ = GridType_Linear
        gridConfig.maxDegree_ = 2  # max(2, level + 1)
        gridConfig.boundaryLevel_ = 0
        gridConfig.dim_ = 2

        # mu = np.ones(gridConfig.dim_) * 0.5
        # cov = np.diag(np.ones(gridConfig.dim_) * 0.1 / 10.)
        # dist = MultivariateNormal(mu, cov, 0, 1)  # problems in 3d/l2
        # f = lambda x: dist.pdf(x)
        def f(x):
            return np.prod(4 * x * (1 - x))

        def f(x):
            return np.arctan(
                50 *
                (x[0] - .35)) + np.pi / 2 + 4 * x[1]**3 + np.exp(x[0] * x[1] -
                                                                 1)

        # --------------------------------------------------------------------------
        # define parameters
        paramsBuilder = ParameterBuilder()
        up = paramsBuilder.defineUncertainParameters()
        for idim in range(gridConfig.dim_):
            up.new().isCalled("x_%i" % idim).withBetaDistribution(3, 3, 0, 1)
        params = paramsBuilder.andGetResult()
        U = params.getIndependentJointDistribution()
        T = params.getJointTransformation()
        # --------------------------------------------------------------------------

        grid = pysgpp.Grid.createGrid(gridConfig)
        gs = grid.getStorage()
        grid.getGenerator().regular(level)
        nodalValues = np.ndarray(gs.getSize())

        p = DataVector(gs.getDimension())
        for i in range(gs.getSize()):
            gp = gs.getCoordinates(gs.getPoint(i), p)
            nodalValues[i] = f(p.array())

        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(nodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        alpha = alpha_vec.array()
        checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13)
        # --------------------------------------------------------------------------

        quad = AnalyticEstimationStrategy()
        mean = quad.mean(grid, alpha, U, T)["value"]
        var = quad.var(grid, alpha, U, T, mean)["value"]

        if self.verbose:
            print("mean: %g" % mean)
            print("var : %g" % var)
            print("-" * 80)

        # drop arbitrary grid points and compute the mean and the variance
        # -> just use leaf nodes for simplicity
        bilinearForm = BilinearGaussQuadratureStrategy(grid.getType())
        bilinearForm.setDistributionAndTransformation(U.getDistributions(),
                                                      T.getTransformations())
        linearForm = LinearGaussQuadratureStrategy(grid.getType())
        linearForm.setDistributionAndTransformation(U.getDistributions(),
                                                    T.getTransformations())

        i = np.random.randint(0, gs.getSize())
        gpi = gs.getPoint(i)
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = ExpectationValueOptRanking()
        mean_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank mean: %g" % (mean_rank, ))
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = VarianceOptRanking()
        var_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank var:  %g" % (var_rank, ))
        # --------------------------------------------------------------------------
        # remove one grid point and update coefficients
        toBeRemoved = IndexList()
        toBeRemoved.push_back(i)
        ixs = gs.deletePoints(toBeRemoved)
        gpsj = []
        new_alpha = np.ndarray(gs.getSize())
        for j in range(gs.getSize()):
            new_alpha[j] = alpha[ixs[j]]
            gpsj.append(gs.getPoint(j))
        # --------------------------------------------------------------------------
        # compute the mean and the variance of the new grid
        mean_trunc = quad.mean(grid, new_alpha, U, T)["value"]
        var_trunc = quad.var(grid, new_alpha, U, T, mean_trunc)["value"]
        basis = getBasis(grid)

        # compute the covariance
        A, _ = bilinearForm.computeBilinearFormByList(gs, [gpi], basis, gpsj,
                                                      basis)
        b, _ = linearForm.computeLinearFormByList(gs, gpsj, basis)

        mean_uwi_phii = np.dot(new_alpha, A[0, :])
        mean_phii, _ = linearForm.getLinearFormEntry(gs, gpi, basis)
        mean_uwi = np.dot(new_alpha, b)
        cov_uwi_phii = mean_uwi_phii - mean_phii * mean_uwi

        # compute the variance of phi_i
        firstMoment, _ = linearForm.getLinearFormEntry(gs, gpi, basis)
        secondMoment, _ = bilinearForm.getBilinearFormEntry(
            gs, gpi, basis, gpi, basis)
        var_phii = secondMoment - firstMoment**2

        # update the ranking
        var_estimated = var_trunc + alpha[i]**2 * var_phii + 2 * alpha[
            i] * cov_uwi_phii

        mean_diff = np.abs(mean_trunc - mean)
        var_diff = np.abs(var_trunc - var)

        if self.verbose:
            print("-" * 80)
            print("diff: |var - var_estimated| = %g" %
                  (np.abs(var - var_estimated), ))
            print("diff: |var - var_trunc|     = %g = %g = var opt ranking" %
                  (var_diff, var_rank))
            print("diff: |mean - mean_trunc|   = %g = %g = mean opt ranking" %
                  (mean_diff, mean_rank))

        self.assertTrue(np.abs(var - var_estimated) < 1e-14)
        self.assertTrue(np.abs(mean_diff - mean_rank) < 1e-14)
        self.assertTrue(np.abs(var_diff - var_rank) < 1e-14)
Esempio n. 11
0
    def tesst_squared(self):
        # parameters
        level = 3

        gridConfig = RegularGridConfiguration()
        gridConfig.type_ = GridType_Linear
        gridConfig.maxDegree_ = 2  # max(2, level + 1)
        gridConfig.boundaryLevel_ = 0
        gridConfig.dim_ = 2

        def f(x):
            return np.prod(8 * x * (1 - x))

        # --------------------------------------------------------------------------
        # define parameters
        paramsBuilder = ParameterBuilder()
        up = paramsBuilder.defineUncertainParameters()
        for idim in range(gridConfig.dim_):
            up.new().isCalled("x_%i" % idim).withUniformDistribution(0, 1)
        params = paramsBuilder.andGetResult()
        U = params.getIndependentJointDistribution()
        T = params.getJointTransformation()
        # --------------------------------------------------------------------------

        grid = pysgpp.Grid.createGrid(gridConfig)
        gs = grid.getStorage()
        grid.getGenerator().regular(level)
        nodalValues = np.ndarray(gs.getSize())
        weightedNodalValues = np.ndarray(gs.getSize())

        p = DataVector(gs.getDimension())
        for i in range(gs.getSize()):
            gp = gs.getCoordinates(gs.getPoint(i), p)
            nodalValues[i] = f(p.array())**2
            weightedNodalValues[i] = f(p.array())**2 * U.pdf(
                T.unitToProbabilistic(p))

        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(nodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        alpha = alpha_vec.array()
        checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13)
        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(weightedNodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        weightedAlpha = alpha_vec.array()
        checkInterpolation(grid,
                           weightedAlpha,
                           weightedNodalValues,
                           epsilon=1e-13)
        # --------------------------------------------------------------------------
        #         np.random.seed(1234567)

        i = np.random.randint(0, gs.getSize())
        gpi = gs.getPoint(i)

        gs.getCoordinates(gpi, p)
        print(evalSGFunction(grid, alpha, p.array()))
        print(evalSGFunctionBasedOnParents(grid, alpha, gpi))

        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = SquaredSurplusRanking()
        squared_surplus_rank = ranking.rank(grid, gpi, weightedAlpha, params)
        if self.verbose:
            print("rank squared surplus: %g" % (squared_surplus_rank, ))
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = AnchoredMeanSquaredOptRanking()
        anchored_mean_squared_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank mean squared   : %g" % (anchored_mean_squared_rank, ))
Esempio n. 12
0
    def test_anchored_variance_opt(self):
        # parameters
        level = 4

        gridConfig = RegularGridConfiguration()
        gridConfig.type_ = GridType_Linear
        gridConfig.maxDegree_ = 2  # max(2, level + 1)
        gridConfig.boundaryLevel_ = 0
        gridConfig.dim_ = 2

        # mu = np.ones(gridConfig.dim_) * 0.5
        # cov = np.diag(np.ones(gridConfig.dim_) * 0.1 / 10.)
        # dist = MultivariateNormal(mu, cov, 0, 1)  # problems in 3d/l2
        # f = lambda x: dist.pdf(x)
        def f(x):
            return np.prod(4 * x * (1 - x))

        def f(x):
            return np.arctan(
                50 *
                (x[0] - .35)) + np.pi / 2 + 4 * x[1]**3 + np.exp(x[0] * x[1] -
                                                                 1)

        # --------------------------------------------------------------------------
        # define parameters
        paramsBuilder = ParameterBuilder()
        up = paramsBuilder.defineUncertainParameters()
        for idim in range(gridConfig.dim_):
            up.new().isCalled("x_%i" % idim).withBetaDistribution(3, 3, 0, 1)
        params = paramsBuilder.andGetResult()
        U = params.getIndependentJointDistribution()
        T = params.getJointTransformation()
        # --------------------------------------------------------------------------

        grid = pysgpp.Grid.createGrid(gridConfig)
        gs = grid.getStorage()
        grid.getGenerator().regular(level)
        nodalValues = np.ndarray(gs.getSize())

        p = DataVector(gs.getDimension())
        for i in range(gs.getSize()):
            gp = gs.getCoordinates(gs.getPoint(i), p)
            nodalValues[i] = f(p.array())

        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(nodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        alpha = alpha_vec.array()
        checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13)
        # --------------------------------------------------------------------------
        i = np.random.randint(0, gs.getSize())
        gpi = gs.getPoint(i)
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = AnchoredVarianceOptRanking()
        var_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank anchored var:  %g" % (var_rank, ))
        # --------------------------------------------------------------------------
        # compute the mean and the variance of the new grid
        x = DataVector(gs.getDimension())
        gs.getCoordinates(gpi, x)
        x = x.array()
        uwxi = evalSGFunction(grid, alpha, x) - alpha[i]
        fx = U.pdf(T.unitToProbabilistic(x))

        var_rank_estimated = np.abs(
            (fx - fx**2) * (-alpha[i]**2 - 2 * alpha[i] * uwxi))

        if self.verbose:
            print("rank anchored var:  %g" % (var_rank_estimated, ))

        if self.verbose:
            print("-" * 80)
            print("diff: |var - var_estimated| = %g" %
                  (np.abs(var_rank - var_rank_estimated), ))
Esempio n. 13
0
# Copyright (C) 2008-today The SG++ project
# This file is part of the SG++ project. For conditions of distribution and
# use, please see the copyright notice provided with SG++ or at
# sgpp.sparsegrids.org

import matplotlib.pyplot as plt
import numpy as np
from scipy import fftpack

from pysgpp.extensions.datadriven.uq.parameters.ParameterBuilder import ParameterBuilder
from pysgpp.extensions.datadriven.uq.plot.plot1d import plotFunction1d

builder = ParameterBuilder()
up = builder.defineUncertainParameters()
up.new().isCalled('x').withUniformDistribution(-np.pi, np.pi)
params = builder.andGetResult()


def f(x):
    return np.arctan(x)  # np.arctan(50 * (x - .35)) # + np.exp(x - 1)


class FourierSeries(object):
    def __init__(self, x, y, x_steps=256):
        self.n = y.size // 2
        ut = np.fft.rfft(y) * np.array([(1. / (2. * self.n))] +
                                       [(1. / self.n)
                                        for i in range(1, self.n)] +
                                       [(1. / (2. * self.n))])
        self.f, self.g = (ut.real, -ut.imag[1:-1])