예제 #1
0
    def __discretize(self):
        """
        Discretize squared f to obtain a well suited grid for all
        further computations.
        """
        def f(p, val):
            q = self.__T.unitToProbabilistic(p)
            return val ** 2 * self.__U.pdf(q)
        grid, _, _ = discretize(self.__grid, self.__alpha, f,
                                refnums=4, deg=1, epsilon=1e-6)

        # hierarchize without pdf
        gs = grid.getStorage()
        nodalValues = DataVector(gs.size())
        p = DataVector(gs.dim())
        for i in xrange(gs.size()):
            gs.get(i).getCoords(p)
            nodalValues[i] = evalSGFunction(self.__grid, self.__alpha, p)

        self.__alpha = hierarchize(grid, nodalValues)
        self.__grid = grid

        err = checkInterpolation(self.__grid, self.__alpha, nodalValues)
        if err is True:
            import pdb; pdb.set_trace()
예제 #2
0
    def __discretize(self):
        """
        Discretize squared f to obtain a well suited grid for all
        further computations.
        """
        def f(p, val):
            q = self.__T.unitToProbabilistic(p)
            return val**2 * self.__U.pdf(q)

        grid, _, _ = discretize(self.__grid,
                                self.__alpha,
                                f,
                                refnums=4,
                                deg=1,
                                epsilon=1e-6)

        # hierarchize without pdf
        gs = grid.getStorage()
        nodalValues = DataVector(gs.size())
        p = DataVector(gs.dim())
        for i in xrange(gs.size()):
            gs.get(i).getCoords(p)
            nodalValues[i] = evalSGFunction(self.__grid, self.__alpha, p)

        self.__alpha = hierarchize(grid, nodalValues)
        self.__grid = grid

        err = checkInterpolation(self.__grid, self.__alpha, nodalValues)
        if err is True:
            import pdb
            pdb.set_trace()
예제 #3
0
    def estimate(self, vol, grid, alpha, f, U, T):
        r"""
        Extraction of the expectation the given sparse grid function
        interpolating the product of function value and pdf.

        \int\limits_{[0, 1]^d} f(x) * pdf(x) dx
        """
        # first: discretize f
        fgrid, falpha, discError = discretize(grid, alpha, f, self.__epsilon,
                                              self.__refnums, self.__pointsNum,
                                              self.level, self.__deg, True)
        # extract correct pdf for moment estimation
        vol, W, pdfError = self.__extractDiscretePDFforMomentEstimation(U, T)
        D = T.getTransformations()

        # compute the integral of the product
        gs = fgrid.getStorage()
        acc = DataVector(gs.size())
        acc.setAll(1.)
        tmp = DataVector(gs.size())
        for i, dims in enumerate(W.getTupleIndices()):
            sgdeDist = W[i]
            # accumulate objects needed for computing the bilinear form
            gpsi, basisi = project(fgrid, dims)
            gpsj, basisj = project(sgdeDist.grid, range(len(dims)))
            A = self.__computeMassMatrix(gpsi, basisi, gpsj, basisj, W, D)
            # A = ScipyQuadratureStrategy(W, D).computeBilinearForm(fgrid)
            self.mult(A, sgdeDist.alpha, tmp)
            acc.componentwise_mult(tmp)

        moment = falpha.dotProduct(acc)
        return vol * moment, discError[1] + pdfError
예제 #4
0
    def estimate(self, vol, grid, alpha, f, U, T, dd):
        r"""
        Extraction of the expectation the given sg function
        interpolating the product of function value and pdf.

        \int\limits_{[0, 1]^d} v(x) dy

        where v(x) := u(x) q(x)
        """
        # extract correct pdf for moment estimation
        vol, W, D = self.__extractPDFforMomentEstimation(U, T, dd)

        # check if there are just uniform distributions given
        if all([isinstance(dist, Uniform) for dist in W.getDistributions()]):
            # for uniformly distributed RVS holds: vol * pdf(x) = 1
            vol = 1
            u = f
        else:
            # interpolate v(x) = f_N^k(x) * pdf(x)
            def u(p, val):
                """
                function to be interpolated
                @param p: coordinates of collocation nodes
                @param val: sparse grid function value at position p
                """
                # extract the parameters we are integrating over
                q = D.unitToProbabilistic(p)
                # compute pdf and take just the dd values
                marginal_pdf = W.pdf(q, marginal=True)[dd]
                return f(p, val) * np.prod(marginal_pdf)

        # discretize the function f on a sparse grid
        n_grid, n_alpha, err = discretize(grid,
                                          alpha,
                                          u,
                                          refnums=self.__refnums,
                                          epsilon=self.__epsilon,
                                          level=self.level,
                                          deg=self.__deg)
        n_alpha.mult(float(vol))

        # Estimate the expectation value
        o_grid, o_alpha, errMarg = doMarginalize(n_grid, n_alpha, dd)

        return o_grid, o_alpha, err[1] + errMarg
    def estimate(self, vol, grid, alpha, f, U, T, dd):
        r"""
        Extraction of the expectation the given sg function
        interpolating the product of function value and pdf.

        \int\limits_{[0, 1]^d} v(x) dy

        where v(x) := u(x) q(x)
        """
        # extract correct pdf for moment estimation
        vol, W = self.__extractPDFforMomentEstimation(U, T, dd)

        # check if there are just uniform distributions given
        if all([isinstance(dist, Uniform) for dist in W.getDistributions()]):
            # for uniformly distributed RVS holds: vol * pdf(x) = 1
            vol = 1
            u = f
        else:
            # interpolate v(x) = f_N^k(x) * pdf(x)
            def u(p, val):
                """
                function to be interpolated
                @param p: coordinates of collocation nodes
                @param val: sparse grid function value at position p
                """
                # extract the parameters we are integrating over
                q = T.unitToProbabilistic(p)
                # compute pdf and take just the dd values
                marginal_pdf = W.pdf(q, marginal=True)[dd]
                return f(p, val) * np.prod(marginal_pdf)

        # discretize the function f on a sparse grid
        n_grid, n_alpha, err = discretize(grid, alpha, u,
                                          refnums=self.__refnums,
                                          epsilon=self.__epsilon,
                                          level=self.level,
                                          deg=self.__deg)
        n_alpha.mult(float(vol))

        # Estimate the expectation value
        o_grid, o_alpha, errMarg = doMarginalize(n_grid, n_alpha, dd)

        return o_grid, o_alpha, err[1] + errMarg
    def estimate(self, A, grid, alpha, k, U, T):
        r"""
        Extraction of the expectation the given sg function by
        assuming constant distribution function in the support range
        of each node.
        """
        gs = grid.getStorage()

        def f(p):
            val = evalSGFunction(grid, alpha, p)
            return val ** k

        n_grid, n_alpha = discretize(grid, alpha, f, refnums=0)

        # add the density measure
        for i in range(gs.size()):
            p = [gs.getCoordinates(gs.getPoint(i), j) for j in range(gs.getDimension())]
            q = U.pdf(tr.trans(p), marginal=True)
            n_alpha[i] *= prod(q)

        # Estimate the expectation value
        return A * doQuadrature(n_grid, n_alpha)
    def estimate(self, A, grid, alpha, k, U, T):
        r"""
        Extraction of the expectation the given sg function by
        assuming constant distribution function in the support range
        of each node.
        """
        gs = grid.getStorage()

        def f(p):
            val = evalSGFunction(grid, alpha, p)
            return val ** k

        n_grid, n_alpha = discretize(grid, alpha, f, refnums=0)

        # add the density measure
        for i in xrange(gs.size()):
            p = [gs.get(i).getCoord(j) for j in range(gs.dim())]
            q = U.pdf(tr.trans(p), marginal=True)
            n_alpha[i] *= prod(q)

        # Estimate the expectation value
        return A * doQuadrature(n_grid, n_alpha)
예제 #8
0
def computeBilinearForm(grid, U):
    """
    Compute bilinear form
    (A)_ij = \int phi_i phi_j dU(x)
    on measure U, which is in this case supposed to be a lebesgue measure.
    @param grid: Grid, sparse grid
    @param U: list of distributions, Lebeasgue measure
    @return: DataMatrix
    """
    gs = grid.getStorage()
    basis = getBasis(grid)
    # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG
    # the product of two piecewise linear functions is a piecewise
    # polynomial one of degree 2.
    ngrid = Grid.createPolyBoundaryGrid(1, 2)
    # ngrid = Grid.createLinearBoundaryGrid(1)
    ngrid.getGenerator().regular(gs.getMaxLevel() + 1)
    ngs = ngrid.getStorage()
    nodalValues = DataVector(ngs.size())

    level = DataMatrix(gs.size(), gs.getDimension())
    index = DataMatrix(gs.size(), gs.getDimension())
    gs.getLevelIndexArraysForEval(level, index)

    A = DataMatrix(gs.size(), gs.size())
    s = np.ndarray(gs.getDimension(), dtype='float')

    # run over all rows
    for i in range(gs.size()):
        gpi = gs.getPoint(i)
        # run over all columns
        for j in range(i, gs.size()):
            # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2)
            gpj = gs.getPoint(j)
            # run over all dimensions
            for d in range(gs.getDimension()):
                # get level index
                lid, iid = level.get(i, d), index.get(i, d)
                ljd, ijd = level.get(j, d), index.get(j, d)

                # compute left and right boundary of the support of both
                # basis functions
                lb = max([((iid - 1) / lid), ((ijd - 1) / ljd)])
                ub = min([((iid + 1) / lid), ((ijd + 1) / ljd)])

                # same level, different index
                if lid == ljd and iid != ijd:
                    s[d] = 0.
                # the support does not overlap
                elif lid != ljd and lb >= ub:
                    s[d] = 0.
                else:
                    # ----------------------------------------------------
                    # do the 1d interpolation ...
                    lid, iid = gpi.getLevel(d), int(iid)
                    ljd, ijd = gpj.getLevel(d), int(ijd)
                    for k in range(ngs.size()):
                        x = ngs.getCoordinate(ngs.getPoint(k), 0)
                        nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \
                            max(0, basis.eval(ljd, ijd, x))
                    # ... by hierarchization
                    v = hierarchize(ngrid, nodalValues)

                    def f(x, y):
                        return float(y * U[d].pdf(x[0]))

                    g, w, _ = discretize(ngrid, v, f, refnums=0)
                    # compute the integral of it
                    s[d] = doQuadrature(g, w)
                    # ----------------------------------------------------
            # store result in matrix
            A.set(i, j, float(np.prod(s)))
            A.set(j, i, A.get(i, j))

    return A
예제 #9
0
def computeBilinearForm(grid, U):
    """
    Compute bilinear form
    (A)_ij = \int phi_i phi_j dU(x)
    on measure U, which is in this case supposed to be a lebesgue measure.
    @param grid: Grid, sparse grid
    @param U: list of distributions, Lebeasgue measure
    @return: DataMatrix
    """
    gs = grid.getStorage()
    basis = getBasis(grid)
    # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG
    # the product of two piecewise linear functions is a piecewise
    # polynomial one of degree 2.
    ngrid = Grid.createPolyBoundaryGrid(1, 2)
    # ngrid = Grid.createLinearBoundaryGrid(1)
    ngrid.createGridGenerator().regular(gs.getMaxLevel() + 1)
    ngs = ngrid.getStorage()
    nodalValues = DataVector(ngs.size())

    level = DataMatrix(gs.size(), gs.dim())
    index = DataMatrix(gs.size(), gs.dim())
    gs.getLevelIndexArraysForEval(level, index)

    A = DataMatrix(gs.size(), gs.size())
    s = np.ndarray(gs.dim(), dtype='float')

    # run over all rows
    for i in xrange(gs.size()):
        gpi = gs.get(i)
        # run over all columns
        for j in xrange(i, gs.size()):
            # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2)
            gpj = gs.get(j)
            # run over all dimensions
            for d in xrange(gs.dim()):
                # get level index
                lid, iid = level.get(i, d), index.get(i, d)
                ljd, ijd = level.get(j, d), index.get(j, d)

                # compute left and right boundary of the support of both
                # basis functions
                lb = max([(iid - 1) / lid, (ijd - 1) / ljd])
                ub = min([(iid + 1) / lid, (ijd + 1) / ljd])

                # same level, different index
                if lid == ljd and iid != ijd:
                    s[d] = 0.
                # the support does not overlap
                elif lid != ljd and lb >= ub:
                    s[d] = 0.
                else:
                    # ----------------------------------------------------
                    # do the 1d interpolation ...
                    lid, iid = gpi.getLevel(d), int(iid)
                    ljd, ijd = gpj.getLevel(d), int(ijd)
                    for k in xrange(ngs.size()):
                        x = ngs.get(k).getCoord(0)
                        nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \
                            max(0, basis.eval(ljd, ijd, x))
                    # ... by hierarchization
                    v = hierarchize(ngrid, nodalValues)

                    def f(x, y):
                        return float(y * U[d].pdf(x[0]))

                    g, w, _ = discretize(ngrid, v, f, refnums=0)
                    # compute the integral of it
                    s[d] = doQuadrature(g, w)
                    # ----------------------------------------------------
            # store result in matrix
            A.set(i, j, float(np.prod(s)))
            A.set(j, i, A.get(i, j))

    return A
예제 #10
0
    def estimate(self, vol, grid, alpha, f, U, T):
        r"""
        Extraction of the expectation the given sparse grid function
        interpolating the product of function value and pdf.

        \int\limits_{[0, 1]^d} f(x) * pdf(x) dx
        """
        # extract correct pdf for moment estimation
        vol, W = self.__extractPDFforMomentEstimation(U, T)

        # check if there are just uniform distributions given
        if all([isinstance(dist, Uniform) for dist in W.getDistributions()]):
            # for uniformly distributed RVS holds: vol * pdf(x) = 1
            vol = 1
            u = f
        else:
            # interpolate u(x) = f_N^k(x) * pdf(x)
            def u(p, val):
                """
                function to be interpolated
                @param p: coordinates of collocation nodes
                @param val: sparse grid function value at position p
                """
                q = W.pdf(T.unitToProbabilistic(p), marginal=True)
                return f(p, val) * np.prod(q)

        # discretize the function f on a sparse grid
#         pdf_grid, pdf_alpha, pdf_err = U.discretize()
#         n_grid, n_alpha, m_err = discretizeProduct(f,
#                                                    grid, alpha,
#                                                    pdf_grid, pdf_alpha,
#                                                    refnums=self.__refnums,
#                                                    epsilon=self.__epsilon)

        n_grid, n_alpha, err = discretize(grid, alpha, u,
                                          refnums=self.__refnums,
                                          pointsNum=self.__pointsNum,
                                          epsilon=self.__epsilon,
                                          level=self.level,
                                          deg=self.__deg)

        moment = vol * doQuadrature(n_grid, n_alpha)

        if abs(moment) > 1e20:
            print moment
            print n_grid.getSize(), len(alpha)
            import pdb; pdb.set_trace()

#         print "-" * 60
#         print evalSGFunction(m_grid, m_alpha, DataVector([0.5, 0.5])), u([0.5, 0.5], None)
#         print evalSGFunction(n_grid, n_alpha, DataVector([0.5, 0.5])), u([0.5, 0.5], None)
#         print "-" * 60
#
#         # do the quadrature on the new grid
#         m_moment = vol * doQuadrature(m_grid, m_alpha)
#
#         print m_moment
#         print n_moment
#
#         import pdb; pdb.set_trace()
        return moment, err[1]
예제 #11
0
def computeBF(grid, U, admissibleSet):
    """
    Compute bilinear form
    (A)_ij = \int phi_i phi_j dU(x)
    on measure U, which is in this case supposed to be a lebesgue measure.
    @param grid: Grid, sparse grid
    @param U: list of distributions, Lebeasgue measure
    @param admissibleSet: AdmissibleSet
    @return: DataMatrix
    """
    gs = grid.getStorage()
    basis = getBasis(grid)
    # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG
    # the product of two piecewise linear functions is a piecewise
    # polynomial one of degree 2.
    ngrid = Grid.createPolyBoundaryGrid(1, 2)
    ngrid.getGenerator().regular(2)
    ngs = ngrid.getStorage()
    nodalValues = DataVector(ngs.size())

    A = DataMatrix(admissibleSet.getSize(), gs.size())
    b = DataVector(admissibleSet.getSize())
    s = np.ndarray(gs.getDimension(), dtype='float')

#     # pre compute basis evaluations
#     basis_eval = {}
#     for li in xrange(1, gs.getMaxLevel() + 1):
#         for i in xrange(1, 2 ** li + 1, 2):
#             # add value with it self
#             x = 2 ** -li * i
#             basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \
#                 basis.eval(li, i, x)
#
#             # left side
#             x = 2 ** -(li + 1) * (2 * i - 1)
#             basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \
#                 basis.eval(li, i, x)
#             # right side
#             x = 2 ** -(li + 1) * (2 * i + 1)
#             basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \
#                 basis.eval(li, i, x)
#
#             # add values for hierarchical lower nodes
#             for lj in xrange(li + 1, gs.getMaxLevel() + 1):
#                 a = 2 ** (lj - li)
#                 j = a * i - a + 1
#                 while j < a * i + a:
#                     # center
#                     x = 2 ** -lj * j
#                     basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \
#                         basis.eval(lj, j, x)
#                     basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)]
#                     # left side
#                     x = 2 ** -(lj + 1) * (2 * j - 1)
#                     basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \
#                         basis.eval(lj, j, x)
#                     basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)]
#                     # right side
#                     x = 2 ** -(lj + 1) * (2 * j + 1)
#                     basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \
#                         basis.eval(lj, j, x)
#                     basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)]
#                     j += 2
#
#     print len(basis_eval)

    # run over all rows
    for i, gpi in enumerate(admissibleSet.values()):
        # run over all columns
        for j in range(gs.size()):
            # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2)
            gpj = gs.getPoint(j)
            for d in range(gs.getDimension()):
                # get level index
                lid, iid = gpi.getLevel(d), gpi.getIndex(d)
                ljd, ijd = gpj.getLevel(d), gpj.getIndex(d)

                # compute left and right boundary of the support of both
                # basis functions
                lb = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd])
                ub = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd])

                # same level, different index
                if lid == ljd and iid != ijd:
                    s[d] = 0.
                # the support does not overlap
                elif lid != ljd and lb >= ub:
                    s[d] = 0.
                else:
                    # ----------------------------------------------------
                    # do the 1d interpolation ...
                    # define transformation function
                    T = LinearTransformation(lb, ub)
                    for k in range(ngs.size()):
                        x = ngs.getCoordinate(ngs.getPoint(k), 0)
                        x = T.unitToProbabilistic(x)
                        nodalValues[k] = basis.eval(lid, iid, x) * \
                            basis.eval(ljd, ijd, x)
                    # ... by hierarchization
                    v = hierarchize(ngrid, nodalValues)

                    # discretize the following function
                    def f(x, y):
                        xp = T.unitToProbabilistic(x)
                        return float(y * U[d].pdf(xp))

                    # sparse grid quadrature
                    g, w, _ = discretize(ngrid, v, f, refnums=0, level=5,
                                         useDiscreteL2Error=False)
                    s[d] = doQuadrature(g, w) * (ub - lb)
#                     fig = plt.figure()
#                     plotSG1d(ngrid, v)
#                     x = np.linspace(xlow, ub, 100)
#                     plt.plot(np.linspace(0, 1, 100), U[d].pdf(x))
#                     fig.show()
#                     fig = plt.figure()
#                     plotSG1d(g, w)
#                     x = np.linspace(0, 1, 100)
#                     plt.plot(x,
#                              [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x])
#                     fig.show()
#                     plt.show()
                    # compute the integral of it
                    # ----------------------------------------------------
            A.set(i, j, float(np.prod(s)))
            if gs.getSequenceNumber(gpi) == j:
                b[i] = A.get(i, j)
    return A, b
def computeBF(grid, U, admissibleSet):
    """
    Compute bilinear form
    (A)_ij = \int phi_i phi_j dU(x)
    on measure U, which is in this case supposed to be a lebesgue measure.
    @param grid: Grid, sparse grid
    @param U: list of distributions, Lebeasgue measure
    @param admissibleSet: AdmissibleSet
    @return: DataMatrix
    """
    gs = grid.getStorage()
    basis = getBasis(grid)
    # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG
    # the product of two piecewise linear functions is a piecewise
    # polynomial one of degree 2.
    ngrid = Grid.createPolyBoundaryGrid(1, 2)
    ngrid.createGridGenerator().regular(2)
    ngs = ngrid.getStorage()
    nodalValues = DataVector(ngs.size())

    A = DataMatrix(admissibleSet.getSize(), gs.size())
    b = DataVector(admissibleSet.getSize())
    s = np.ndarray(gs.dim(), dtype='float')

#     # pre compute basis evaluations
#     basis_eval = {}
#     for li in xrange(1, gs.getMaxLevel() + 1):
#         for i in xrange(1, 2 ** li + 1, 2):
#             # add value with it self
#             x = 2 ** -li * i
#             basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \
#                 basis.eval(li, i, x)
#
#             # left side
#             x = 2 ** -(li + 1) * (2 * i - 1)
#             basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \
#                 basis.eval(li, i, x)
#             # right side
#             x = 2 ** -(li + 1) * (2 * i + 1)
#             basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \
#                 basis.eval(li, i, x)
#
#             # add values for hierarchical lower nodes
#             for lj in xrange(li + 1, gs.getMaxLevel() + 1):
#                 a = 2 ** (lj - li)
#                 j = a * i - a + 1
#                 while j < a * i + a:
#                     # center
#                     x = 2 ** -lj * j
#                     basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \
#                         basis.eval(lj, j, x)
#                     basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)]
#                     # left side
#                     x = 2 ** -(lj + 1) * (2 * j - 1)
#                     basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \
#                         basis.eval(lj, j, x)
#                     basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)]
#                     # right side
#                     x = 2 ** -(lj + 1) * (2 * j + 1)
#                     basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \
#                         basis.eval(lj, j, x)
#                     basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)]
#                     j += 2
#
#     print len(basis_eval)

    # run over all rows
    for i, gpi in enumerate(admissibleSet.values()):
        # run over all columns
        for j in xrange(gs.size()):
            # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2)
            gpj = gs.get(j)
            for d in xrange(gs.dim()):
                # get level index
                lid, iid = gpi.getLevel(d), gpi.getIndex(d)
                ljd, ijd = gpj.getLevel(d), gpj.getIndex(d)

                # compute left and right boundary of the support of both
                # basis functions
                lb = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd])
                ub = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd])

                # same level, different index
                if lid == ljd and iid != ijd:
                    s[d] = 0.
                # the support does not overlap
                elif lid != ljd and lb >= ub:
                    s[d] = 0.
                else:
                    # ----------------------------------------------------
                    # do the 1d interpolation ...
                    # define transformation function
                    T = LinearTransformation(lb, ub)
                    for k in xrange(ngs.size()):
                        x = ngs.get(k).getCoord(0)
                        x = T.unitToProbabilistic(x)
                        nodalValues[k] = basis.eval(lid, iid, x) * \
                            basis.eval(ljd, ijd, x)
                    # ... by hierarchization
                    v = hierarchize(ngrid, nodalValues)

                    # discretize the following function
                    def f(x, y):
                        xp = T.unitToProbabilistic(x)
                        return float(y * U[d].pdf(xp))

                    # sparse grid quadrature
                    g, w, _ = discretize(ngrid, v, f, refnums=0, level=5,
                                         useDiscreteL2Error=False)
                    s[d] = doQuadrature(g, w) * (ub - lb)
#                     fig = plt.figure()
#                     plotSG1d(ngrid, v)
#                     x = np.linspace(xlow, ub, 100)
#                     plt.plot(np.linspace(0, 1, 100), U[d].pdf(x))
#                     fig.show()
#                     fig = plt.figure()
#                     plotSG1d(g, w)
#                     x = np.linspace(0, 1, 100)
#                     plt.plot(x,
#                              [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x])
#                     fig.show()
#                     plt.show()
                    # compute the integral of it
                    # ----------------------------------------------------
            A.set(i, j, float(np.prod(s)))
            if gs.seq(gpi) == j:
                b[i] = A.get(i, j)
    return A, b