Beispiel #1
0
    def __init__(self, mu, sigma, a, b):
        super(Lognormal, self).__init__()

        self.__mu = mu
        self.__sigma = sigma
        self._dist = lognorm(sigma, scale=mu)

        self.__a = a
        self.__b = b
        self.__linearTrans = LinearTransformation(self._dist.cdf(self.__a),
                                                  self._dist.cdf(self.__b))
Beispiel #2
0
    def setUp(self):
        # task: interpolate v(x) = f_N(x) * p(x)
        def f(x):
            return np.prod([4 * xi * (1 - xi) * np.sin(xi) for xi in x])
#             return np.prod([16 * (xi * (1 - xi)) ** 2 for xi in x])

        self.f = f
        #         self.U = J([Uniform(0, 1), Uniform(0, 1)])
        #         self.W = J([TNormal(0.5, 0.4, 0, 1), TNormal(0.5, 0.4, 0, 1)])
        self.T = JointTransformation()
        self.T.add(LinearTransformation(0, 1))
        self.T.add(LinearTransformation(0, 1))
        # self.p = [0.4, 0.7]
        self.p = [0.4]
Beispiel #3
0
    def computeLinearTransformation(self, bounds):
        if len(bounds.shape) == 1:
            bounds = np.array([bounds])
        if bounds.shape[1] != 2:
            raise AttributeError("EstimatedDist - bounds have the wrong shape")

        # init linear transformation
        trans = JointTransformation()
        for idim in range(bounds.shape[0]):
            trans.add(LinearTransformation(bounds[idim, 0], bounds[idim, 1]))

        return trans
Beispiel #4
0
    def test_2DNormalDist_variance(self):
        # prepare data
        U = dists.J(
            [dists.Normal(2.0, .5, -1, 4),
             dists.Normal(1.0, .5, -1, 3)])
        #         U = dists.J([dists.Normal(0.5, .5, -1, 2),
        #                      dists.Normal(0.5, .4, -1, 2)])

        # define linear transformation
        trans = JointTransformation()
        for a, b in U.getBounds():
            trans.add(LinearTransformation(a, b))

        # get a sparse grid approximation
        grid = Grid.createPolyGrid(U.getDim(), 10)
        grid.getGenerator().regular(5)
        gs = grid.getStorage()

        # now refine adaptively 5 times
        p = DataVector(gs.getDimension())
        nodalValues = np.ndarray(gs.getSize())

        # set function values in alpha
        for i in range(gs.getSize()):
            gs.getPoint(i).getStandardCoordinates(p)
            nodalValues[i] = U.pdf(trans.unitToProbabilistic(p.array()))

        # hierarchize
        alpha = hierarchize(grid, nodalValues)

        #         # make positive
        #         alpha_vec = DataVector(alpha)
        #         createOperationMakePositive().makePositive(grid, alpha_vec)
        #         alpha = alpha_vec.array()

        dist = SGDEdist(grid, alpha, bounds=U.getBounds())

        fig = plt.figure()
        plotDensity2d(U)
        fig.show()

        fig = plt.figure()
        plotSG2d(dist.grid,
                 dist.alpha,
                 addContour=True,
                 show_negative=True,
                 show_grid_points=True)
        fig.show()

        print("2d: mean = %g ~ %g" % (U.mean(), dist.mean()))
        print("2d: var = %g ~ %g" % (U.var(), dist.var()))
        plt.show()
Beispiel #5
0
 def _extractPDFforMomentEstimation(self, U, T):
     dists = []
     jointTrans = []
     vol = 1.
     # check if importance sampling has been used for some parameters
     for i, trans in enumerate(T.getTransformations()):
         # if this is the case replace them by a uniform distribution
         if isinstance(trans, RosenblattTransformation):
             for _ in range(trans.getSize()):
                 dists.append(Uniform(0, 1))
                 jointTrans.append(LinearTransformation(0.0, 1.0))
         else:
             vol *= trans.vol()
             dists.append(U.getDistributions()[i])
             jointTrans.append(trans)
     return vol, J(dists), jointTrans
Beispiel #6
0
    def __extractPDFforMomentEstimation(self, U, T, dd):
        dists = U.getDistributions()
        jointTrans = JointTransformation()
        vol = 1.
        # check if importance sampling has been used for some parameters
        for i, trans in enumerate(T.getTransformations()):
            # if this is the case replace them by a uniform distribution
            if isinstance(trans, InverseCDFTransformation):
                dists[i] = Uniform(0, 1)
                jointTrans.add(LinearTransformation(0.0, 0.1))
            else:
                jointTrans.add(trans)
                if i in dd:
                    vol *= trans.vol()

        return vol, J(dists), jointTrans
Beispiel #7
0
    def test_1DNormalDist_variance(self):
        # prepare data
        U = dists.Normal(1, 2, -8, 8)
        #         U = dists.Normal(0.5, .2, 0, 1)

        # define linear transformation
        trans = JointTransformation()
        a, b = U.getBounds()
        trans.add(LinearTransformation(a, b))

        # get a sparse grid approximation
        grid = Grid.createPolyGrid(U.getDim(), 10)
        grid.getGenerator().regular(5)
        gs = grid.getStorage()

        # now refine adaptively 5 times
        p = DataVector(gs.getDimension())
        nodalValues = np.ndarray(gs.getSize())

        # set function values in alpha
        for i in range(gs.getSize()):
            gs.getPoint(i).getStandardCoordinates(p)
            nodalValues[i] = U.pdf(trans.unitToProbabilistic(p.array()))

        # hierarchize
        alpha = hierarchize(grid, nodalValues)
        dist = SGDEdist(grid, alpha, bounds=U.getBounds())

        fig = plt.figure()
        plotDensity1d(U,
                      alpha_value=0.1,
                      mean_label="$\mathbb{E}",
                      interval_label="$\alpha=0.1$")
        fig.show()

        fig = plt.figure()
        plotDensity1d(dist,
                      alpha_value=0.1,
                      mean_label="$\mathbb{E}",
                      interval_label="$\alpha=0.1$")
        fig.show()

        print("1d: mean = %g ~ %g" % (U.mean(), dist.mean()))
        print("1d: var = %g ~ %g" % (U.var(), dist.var()))
        plt.show()
Beispiel #8
0
def discretizeFunction(f, bounds, level=2, hasBorder=False, *args, **kws):
    # define linear transformation to the unit hyper cube
    T = JointTransformation()
    for xlim in bounds:
        T.add(LinearTransformation(xlim[0], xlim[1]))

    # create grid
    dim = len(bounds)

    # create adequate grid
    if hasBorder:
        grid = Grid.createLinearBoundaryGrid(dim)
    else:
        grid = Grid.createLinearGrid(dim)

    # init storage
    grid.createGridGenerator().regular(level)
    gs = grid.getStorage()

    # discretize on given level
    p = DataVector(dim)
    nodalValues = DataVector(gs.size())
    for i in xrange(gs.size()):
        gs.get(i).getCoords(p)
        # transform to the right space
        q = T.unitToProbabilistic(p.array())
        # apply the given function
        nodalValues[i] = float(f(q))

    # hierarchize
    alpha = hierarchize(grid, nodalValues)

    # estimate the l2 error
    err = estimateDiscreteL2Error(grid, alpha, f)

    # TODO: adaptive refinement
    return grid, alpha, err
Beispiel #9
0
    def computeBilinearFormEntry(self, gs, gpi, basisi, gpj, basisj, d):
        # if not, compute it
        ans = 1
        err = 0.

        # interpolating 1d sparse grid
        ngrid = Grid.createPolyBoundaryGrid(1, 2)
        ngrid.getGenerator().regular(2)
        ngs = ngrid.getStorage()
        nodalValues = DataVector(ngs.getSize())

        for d in range(gpi.getDimension()):
            # get level index
            lid, iid = gpi.getLevel(d), gpi.getIndex(d)
            ljd, ijd = gpj.getLevel(d), gpj.getIndex(d)

            # compute left and right boundary of the support of both
            # basis functions
            xlowi, xhighi = getBoundsOfSupport(gs, lid, iid)
            xlowj, xhighj = getBoundsOfSupport(gs, ljd, ijd)

            xlow = max(xlowi, xlowj)
            xhigh = min(xhighi, xhighj)

            # same level, different index
            if lid == ljd and iid != ijd and lid > 0:
                return 0., 0.
            # the support does not overlap
            elif lid != ljd and xlow >= xhigh:
                return 0., 0.
            else:
                # ----------------------------------------------------
                # do the 1d interpolation ...
                # define transformation function
                T = LinearTransformation(xlow, xhigh)
                for k in range(ngs.getSize()):
                    x = ngs.getCoordinate(ngs.getPoint(k), 0)
                    x = T.unitToProbabilistic(x)
                    nodalValues[k] = basisi.eval(lid, iid, x) * \
                        basisj.eval(ljd, ijd, x)
                # ... by hierarchization
                v = hierarchize(ngrid, nodalValues)

                # discretize the following function
                def f(x, y):
                    xp = T.unitToProbabilistic(x)
                    return float(y * self._U[d].pdf(xp))

                # sparse grid quadrature
                g, w, err1d = discretize(ngrid,
                                         v,
                                         f,
                                         refnums=0,
                                         level=5,
                                         useDiscreteL2Error=False)
                s = T.vol() * doQuadrature(g, w)
                #                     fig = plt.figure()
                #                     plotSG1d(ngrid, v)
                #                     x = np.linspace(xlow, ub, 100)
                #                     plt.plot(np.linspace(0, 1, 100), U[d].pdf(x))
                #                     fig.show()
                #                     fig = plt.figure()
                #                     plotSG1d(g, w)
                #                     x = np.linspace(0, 1, 100)
                #                     plt.plot(x,
                #                              [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x])
                #                     fig.show()
                #                     plt.show()
                # compute the integral of it
                # ----------------------------------------------------
                ans *= s
                err += err1d[1]

        return ans, err
Beispiel #10
0
class Lognormal(Dist):
    """
    The Log-normal distribution
    """
    def __init__(self, mu, sigma, a, b):
        super(Lognormal, self).__init__()

        self.__mu = mu
        self.__sigma = sigma
        self._dist = lognorm(sigma, scale=mu)

        self.__a = a
        self.__b = b
        self.__linearTrans = LinearTransformation(self._dist.cdf(self.__a),
                                                  self._dist.cdf(self.__b))

    @classmethod
    def by_range(cls, *args, **kws):
        """
        Constructor given a interval
        """
        return cls(*args, **kws)

    @classmethod
    def by_alpha(cls, mu, sigma, alpha, *args, **kws):
        """
        Constructor given a confidence value
        @param mu: expectation value
        @param sigma: standard deviation
        @param alpha: significance level
        """
        U = lognorm(sigma, scale=mu)
        a = U.ppf(alpha / 2.)
        b = U.ppf(1. - alpha / 2.)

        return cls(mu, sigma, a=a, b=b, *args, **kws)

    def pdf(self, x):
        if self.__a <= x <= self.__b:
            return self._dist.pdf(x)
        else:
            return 0.0

    def cdf(self, x):
        if x < self.__a:
            return 0.0
        elif x > self.__b:
            return 1.0
        else:
            x_unit = self._dist.cdf(x)
            return self.__linearTrans.probabilisticToUnit(x_unit)

    def ppf(self, x):
        x_prob = self.__linearTrans.unitToProbabilistic(x)
        return self._dist.ppf(x_prob)

    def mean(self):
        return self.__mu

    def var(self):
        return self.__sigma**2

    def std(self):
        return self.__sigma

    def rvs(self, n=1):
        samples = np.zeros(n)
        i = 0
        while i < n:
            newSamples = self._dist.rvs(n - i)
            # check range
            for sample in newSamples:
                if self.__a <= sample <= self.__b:
                    samples[i] = sample
                    i += 1
        return samples

    def getBounds(self):
        return np.array([self.__a, self.__b])

    def getDim(self):
        return 1

    def __str__(self):
        return "LogNorm(%g, %g, %g, %g)" % (self.__mu, self.__sigma, self.__a,
                                            self.__b)

    def toJson(self):
        serializationString = '"module" : "' + \
                              self.__module__ + '",\n'

        for attrName in [
                "_Lognormal__mu", "_Lognormal__sigma", "_Lognormal__a",
                "_Lognormal__b"
        ]:
            attrValue = self.__getattribute__(attrName)
            serializationString += ju.parseAttribute(attrValue, attrName)

        s = serializationString.rstrip(",\n")

        return "{" + s + "}"

    @classmethod
    def fromJson(cls, jsonObject):
        """
        Restores the Lognormal object from the json object with its
        attributes.
        @param jsonObject: json object
        @return: the restored UQSetting object
        """
        # restore surpluses
        key = '_Lognormal__mu'
        if key in jsonObject:
            mu = float(jsonObject[key])

        key = '_Lognormal__sigma'
        if key in jsonObject:
            sigma = float(jsonObject[key])

        key = '_Lognormal__a'
        if key in jsonObject:
            a = float(jsonObject[key])

        key = '_Lognormal__b'
        if key in jsonObject:
            b = float(jsonObject[key])

        return Lognormal(mu, sigma, a, b)
    def computeBilinearFormEntry(self, gpi, basisi, gpj, basisj):
        # check if this entry already exists
        for key in [self.getKey(gpi, gpj), self.getKey(gpj, gpi)]:
            if key in self._map:
                return self._map[key]

        # if not, compute it
        ans = 1
        err = 0.

        # interpolating 1d sparse grid
        ngrid = Grid.createPolyBoundaryGrid(1, 2)
        ngrid.createGridGenerator().regular(2)
        ngs = ngrid.getStorage()
        nodalValues = DataVector(ngs.size())

        for d in xrange(gpi.dim()):
            # get level index
            lid, iid = gpi.getLevel(d), gpi.getIndex(d)
            ljd, ijd = gpj.getLevel(d), gpj.getIndex(d)

            # compute left and right boundary of the support of both
            # basis functions
            xlowi, xhighi = self.getBounds(lid, iid)
            xlowj, xhighj = self.getBounds(ljd, ijd)

            xlow = max(xlowi, xlowj)
            xhigh = min(xhighi, xhighj)

            # same level, different index
            if lid == ljd and iid != ijd and lid > 0:
                return 0., 0.
            # the support does not overlap
            elif lid != ljd and xlow >= xhigh:
                return 0., 0.
            else:
                # ----------------------------------------------------
                # do the 1d interpolation ...
                # define transformation function
                T = LinearTransformation(xlow, xhigh)
                for k in xrange(ngs.size()):
                    x = ngs.get(k).getCoord(0)
                    x = T.unitToProbabilistic(x)
                    nodalValues[k] = basisi.eval(lid, iid, x) * \
                        basisj.eval(ljd, ijd, x)
                # ... by hierarchization
                v = hierarchize(ngrid, nodalValues)

                # discretize the following function
                def f(x, y):
                    xp = T.unitToProbabilistic(x)
                    return float(y * self._U[d].pdf(xp))

                # sparse grid quadrature
                g, w, err1d = discretize(ngrid, v, f, refnums=0, level=5,
                                         useDiscreteL2Error=False)
                s = T.vol() * doQuadrature(g, w)
#                     fig = plt.figure()
#                     plotSG1d(ngrid, v)
#                     x = np.linspace(xlow, ub, 100)
#                     plt.plot(np.linspace(0, 1, 100), U[d].pdf(x))
#                     fig.show()
#                     fig = plt.figure()
#                     plotSG1d(g, w)
#                     x = np.linspace(0, 1, 100)
#                     plt.plot(x,
#                              [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x])
#                     fig.show()
#                     plt.show()
                # compute the integral of it
                # ----------------------------------------------------
                ans *= s
                err += err1d[1]

        return ans, err