Example #1
0
def makePositive(grid, alpha):
    """
    insert full grid points if they are negative and the father
    node is part of the sparse grid
    @param grid:
    @param alpha:
    """
    # copy old sg function
    jgrid = copyGrid(grid)

    # evaluate the sparse grid function at all full grid points
    level = grid.getStorage().getMaxLevel()
    fg = Grid.createLinearGrid(grid.getDimension())
    fg.getGenerator().full(level)

    # copy the old grid and use it as reference
    jgs = jgrid.getStorage()
    fgs = fg.getStorage()

    # run over all results and check where the function value
    # is lower than zero
    cnt = 1
    while True:
        print("run %i: full grid size = %i" % (cnt, fgs.size()))
        gps = []

        # insert those fg points, which are not yet positive
        values = computeNodalValues(fg, grid, alpha)
        for i in range(len(values)):
            gp = fgs.getPoint(i)
            if values[i] < 0 and not jgs.isContaining(gp):
                gps += insertPoint(jgrid, gp)
                gps += insertHierarchicalAncestors(jgrid, gp)

        jgrid.getStorage().recalcLeafProperty()

        # 1. compute nodal values for new grid points
        jnodalValues = computeNodalValues(jgrid, grid, alpha)
        # 2. set the new ones to zero
        jgs = jgrid.getStorage()
        for gp in gps:
            jnodalValues[jgs.getSequenceNumber(gp)] = 0.
        # 3. hierarchize
        jalpha = hierarchize(jgrid, jnodalValues)
        # stop loop if no points have been added
        if len(gps) == 0:
            break
        # 4. reset values for next loop
        grid = copyGrid(jgrid)
        alpha = DataVector(jalpha)
        cnt += 1

    return jgrid, jalpha
Example #2
0
    def update(self, grid, alpha, qoi, t, dtype, iteration):
        """
        Update the knowledge
        @param grid: Grid
        @param alpha: DataVector surplus vector
        @param qoi: string quantity of interest
        @param t: float time step
        @param dtype: KnowledgeType
        @param iteration: int iteration number
        """
        # build dictionary
        if iteration not in self.__alphas:
            self.__alphas[iteration] = {}
            self.__grids[iteration] = {}

        if qoi not in self.__alphas[iteration]:
            self.__alphas[iteration][qoi] = {}
            self.__grids[iteration][qoi] = {}

        if dtype not in self.__alphas[iteration][qoi]:
            self.__alphas[iteration][qoi][dtype] = {}

        if t not in self.__alphas[iteration][qoi][dtype]:
            self.__alphas[iteration][qoi][dtype][t] = {}

        # store knowledge
        self.__iteration = iteration
        self.__grids[iteration][qoi] = copyGrid(grid)
        self.__alphas[iteration][qoi][dtype][t] = DataVector(alpha)
Example #3
0
    def learnDataWithTest(self, dataset=None, *args, **kws):
        if self.verbose:
            print("learning with test (i=%i, gs=%i)" %
                  (self.sampler.getCurrentIterationNumber(),
                   self.sampler.getGrid().getSize()))
        # learn data
        self.learner.grid = self.sampler.getGrid()
        for dtype, values in list(self.dataContainer.items()):
            knowledge = {}
            # do the learning
            for t in np.sort(list(values.keys())):
                dataContainer = values[t]
                if self.verbose:
                    print(("t = %g, " % t, ))
                sys.stdout.flush()
                if dataContainer is not None:
                    # learn data, if there is any available
                    self.learner.dataContainer = dataContainer
                    self.learner.learnDataWithTest(dtype=dtype)

                    # update the knowledge
                    self.knowledge.update(
                        copyGrid(self.learner.grid), self.learner.alpha,
                        self._qoi, t, dtype,
                        self.sampler.getCurrentIterationNumber() - 1)

                    # update results
                    self.stats.updateResults(dtype, t, self.learner)

            if self.verbose:
                print()
Example #4
0
    def learnDataWithoutTest(self, *args, **kws):
        # learn data
        if self.verbose:
            print("learning (i=%i, gs=%i, type=%s)" %
                  (self.sampler.getCurrentIterationNumber(),
                   self.sampler.getGrid().getSize(),
                   self.sampler.getGrid().getTypeAsString()))
        self.learner.grid = self.sampler.getGrid()
        for dtype, values in list(self.dataContainer.items()):
            knowledge = {}
            if self.verbose:
                print(KnowledgeTypes.toString(dtype))
            # do the learning
            for t, dataContainer in list(values.items()):
                if self.verbose:
                    print(("t = %g, " % t, ))
                sys.stdout.flush()
                if dataContainer is not None:
                    # learn data, if there is any available
                    self.learner.dataContainer = dataContainer
                    self.learner.learnData()

                    # update the knowledge
                    self.knowledge.update(
                        copyGrid(self.learner.grid), self.learner.alpha,
                        self._qoi, t, dtype,
                        self.sampler.getCurrentIterationNumber() - 1)

                    # update results
                    self.stats.updateResults(dtype, t, self.learner)

            if self.verbose:
                print()
Example #5
0
    def update(self, grid, alpha, qoi, t, dtype, iteration):
        """
        Update the knowledge
        @param grid: Grid
        @param alpha: DataVector surplus vector
        @param qoi: string quantity of interest
        @param t: float time step
        @param dtype: KnowledgeType
        @param iteration: int iteration number
        """
        # build dictionary
        if iteration not in self.__alphas:
            self.__alphas[iteration] = {}
            self.__grids[iteration] = {}

        if qoi not in self.__alphas[iteration]:
            self.__alphas[iteration][qoi] = {}
            self.__grids[iteration][qoi] = {}

        if dtype not in self.__alphas[iteration][qoi]:
            self.__alphas[iteration][qoi][dtype] = {}

        if t not in self.__alphas[iteration][qoi][dtype]:
            self.__alphas[iteration][qoi][dtype][t] = {}

        # store knowledge
        self.__iteration = iteration
        self.__grids[iteration][qoi] = copyGrid(grid)
        self.__alphas[iteration][qoi][dtype][t] = DataVector(alpha)
Example #6
0
def join(grid1, grid2, *args, **kws):
    """
    Join two grids, which are not of the same dimensionality. The grid
    of lower dimensionality is extended to the larger one by adding
    a full grid resolution in the new directions.
    @param grid1: Grid, sparse grid
    @param grid2: Grid, sparse grid
    @return: Grid, joined sparse grid of dimensionality max(dim1, dim2) with
    basis of grid2
    """
    dim1 = grid1.getDimension()
    dim2 = grid2.getDimension()

    if dim1 < dim2:
        grid1 = extend_grid(grid1, dim2 - dim1, *args, **kws)
    elif dim2 < dim1:
        grid2 = extend_grid(grid2, dim1 - dim2, *args, **kws)
    else:
        grid1 = copyGrid(grid1, *args, **kws)
        grid2 = copyGrid(grid2, *args, **kws)

    gs1 = grid1.getStorage()
    gs2 = grid2.getStorage()

    # join grid points: copy all the grid points from grid 1 to grid 2
    for i in range(gs1.size()):
        gp = gs1.getPoint(i)

        # insert grid point
        if not gs2.isContaining(gp):
            gs2.insert(gp)

    gs2.recalcLeafProperty()

    # return the joined grid
    return grid2
Example #7
0
    def learnDataWithFolding(self, *args, **kws):
        # learn data
        for dtype, values in list(self.dataContainer.items()):
            knowledge = {}

            # do the learning
            for t, dataContainer in list(values.items()):
                if dataContainer is not None:
                    learner = self._learners[t]
                    # learn data, if there is any available
                    learner.grid = self.getGrid()
                    learner.dataContainer = dataContainer
                    alpha = learner.learnDataWithFolding(dtype=dtype)

                    # prepare the answer
                    knowledge[t] = copyGrid(learner.grid), DataVector(alpha)

            # update results
            if len(knowledge) > 0:
                self.updateResults(knowledge, dtype)
    def learnDataWithFolding(self, *args, **kws):
        # learn data
        for dtype, values in self.dataContainer.items():
            knowledge = {}

            # do the learning
            for t, dataContainer in values.items():
                if dataContainer is not None:
                    learner = self._learners[t]
                    # learn data, if there is any available
                    learner.grid = self.getGrid()
                    learner.dataContainer = dataContainer
                    alpha = learner.learnDataWithFolding(dtype=dtype)

                    # prepare the answer
                    knowledge[t] = copyGrid(learner.grid), DataVector(alpha)

            # update results
            if len(knowledge) > 0:
                self.updateResults(knowledge, dtype)
Example #9
0
    def learnData(self, *args, **kws):
        # learn data
        for dtype, values in self.dataContainer.items():
            knowledge = {}
            print KnowledgeTypes.toString(dtype)
            # do the learning
            for t, dataContainer in values.items():
                print "t = %g, " % t,
                if dataContainer is not None:
                    learner = self._learners[t]
                    # learn data, if there is any available
                    learner.grid = self.getGrid()
                    learner.dataContainer = dataContainer
                    alpha = learner.learnData()

                    # prepare the answer
                    knowledge[t] = copyGrid(learner.grid), DataVector(alpha)
            print
            # update results
            if len(knowledge) > 0:
                self.updateResults(knowledge, dtype)
Example #10
0
def discretize(grid,
               alpha,
               f,
               epsilon=0.,
               refnums=0,
               pointsNum=10,
               level=0,
               deg=1,
               useDiscreteL2Error=True):
    """
    discretize f with a sparse grid

    @param grid: Grid
    @param alpha: surplus vector
    @param f: function
    @param epsilon: float, error tolerance
    @param refnums: int, number of refinment steps
    @param pointsNum: int, number of points to be refined per step
    @param level: int, initial grid level
    @param deg: int, degree of lagrange basis
    """
    # copy grid
    jgrid = copyGrid(grid, level=level, deg=deg)
    jgs = jgrid.getStorage()
    jgn = jgrid.getGenerator()
    basis_alpha = DataVector(alpha)

    # compute joined sg function
    jalpha = computeCoefficients(jgrid, grid, alpha, f)

    # compute errors
    maxdrift = None
    accMiseL2 = None
    l2error_grid = DataVector(alpha).l2Norm()
    if useDiscreteL2Error:
        maxdrift, accMiseL2 = computeErrors(jgrid, jalpha, grid, alpha, f)
    else:
        accMiseL2 = l2error_grid

#     print( "iteration 0/%i (%i, %i, %g): %g, %g, %s" % \ )
#         (refnums, jgs.getSize(), len(jalpha),
#          epsilon, accMiseL2, l2error_grid, maxdrift)

    ref = 0
    errs = [jgs.getSize(), accMiseL2, l2error_grid, maxdrift]
    bestGrid, bestAlpha, bestL2Error = copyGrid(jgrid), DataVector(
        jalpha), accMiseL2

    # repeat refinement as long as there are iterations and the
    # minimum error epsilon is reached
    jalpha = DataVector(jalpha)
    while ref < refnums and bestL2Error > epsilon:
        oldgrid = copyGrid(jgrid)
        rp = jgn.getNumberOfRefinablePoints(
        )  # max(1, min(pointsNum, jgn.getNumberOfRefinablePoints()))
        jgn.refine(SurplusRefinementFunctor(jalpha, rp, epsilon))

        # if grid point has been added in the last iteration step
        if len(basis_alpha) == jgs.getSize():
            break

        # extend alpha vector...
        basis_alpha.resizeZero(jgs.getSize())

        # ------------------------------
        # compute joined sg function
        jalpha = computeCoefficients(jgrid, grid, basis_alpha, f)
        # compute useDiscreteL2Error
        l2error_grid = estimateL2error(oldgrid, jgrid, jalpha)

        # do Monte Carlo integration for obtaining the accMiseL2
        if useDiscreteL2Error:
            maxdrift, accMiseL2 = computeErrors(jgrid, jalpha, grid, alpha, f)
        # ------------------------------
        print( "iteration %i/%i (%i, %i, %i, %i, %g): %g, %g, %s -> current best %g" % \
            (ref + 1, refnums,
             jgs.getSize(), len(jalpha),
             bestGrid.getSize(), len(bestAlpha),
             epsilon,
             accMiseL2, l2error_grid, maxdrift, bestL2Error))

        # check whether the new grid is better than the current best one
        # using the discrete l2 error. If no MC integration is done,
        # use the l2 error approximation via the sparse grid surpluses
        if (not useDiscreteL2Error and l2error_grid < bestL2Error) or \
                (useDiscreteL2Error and accMiseL2 < bestL2Error):
            bestGrid = copyGrid(jgrid)
            bestAlpha = DataVector(jalpha)
            if useDiscreteL2Error:
                bestL2Error = accMiseL2
            else:
                bestL2Error = l2error_grid
            errs = [jgs.getSize(), accMiseL2, l2error_grid, maxdrift]

        ref += 1

    return bestGrid, bestAlpha, errs
Example #11
0
    def __refine(self, learner, B, simulate=False):
        # get sparse grid
        grid = learner.getGrid()
        if simulate:
            oldGrid = grid
            grid = copyGrid(grid)
            learner.grid = grid

        # find how many points should be refined
        pointsNum = learner.getNumOfPointsToRefine(len(B))

        # refine now step by step
        newGridPoints = []
        refinedPoints = []
        gs = grid.getStorage()
        iteration = learner.iteration
        # size of grid before refinement
        n1 = gs.size()

        # as long as the end of learning has not been reached, continue...
        while pointsNum > 0 and len(B) > 0 and \
            (not learner.stopPolicy or
             not learner.stopPolicy.hasLimitReached(learner)):
            # note: the highest rated grid point is at the end of B
            vi, gp = B.pop()

            # some printing
            if not simulate:
                print "refine %i/%i (%i, %i) = %g" % \
                    (pointsNum, len(B), len(newGridPoints),
                     len(refinedPoints), vi)

            # refine the grid
            nps = self._localRefinementStrategy.refine(grid, gp)

            # ## set surplus vector such that just the desired point
            # ## is going to be refined and nothing else
            # oldgs = HashGridStorage(gs)
            # alpha = DataVector(gs.size())
            # alpha.setAll(0.0)
            # alpha[gs.seq(gp)] = 2.0
            # refFunc = SurplusRefinementFunctor(alpha, 1, 1)
            # ## TODO: try refineMaxLevel(refFunc, maxLevel)
            # grid.createGridGenerator().refine(refFunc)

            # nps = []
            # for i in xrange(gs.size()):
            #     if not oldgs.has_key(gs.get(i)):
            #         nps.append(i)

            # check there have been added some new points
            if not learner.stopPolicy or \
                    learner.stopPolicy.hasGridSizeChanged(learner):
                # if something has been refined then reduce the number
                # of points which should still be refined
                pointsNum -= 1

                # store which point has been refined
                refinedPoints.append(HashGridIndex(gp))
                newGridPoints += nps

                # increase iteration of the learner
                learner.iteration += 1

        # balance the grid
        if self._balancing:
            newGridPoints += balance(grid)

        # update admissible set
        if not simulate:
            self._admissibleSet.update(grid, newGridPoints)

        # make sure that I have collected all the new grid points
        assert len(newGridPoints) == gs.size() - n1

        # reset the iteration variable @TODO: the iteration variable
        # is ambiguous. It represents in the TrainingStopPolicy the
        # number of refinement steps, in the context of ASGC it
        # represents the number of refinements. So here we neglect the
        # first part and use it just internally so that the
        # hasLimitReached works.
        learner.iteration = iteration

#         if not simulate:
#             gs = grid.getStorage()
#             p = DataVector(gs.dim())
#
#             for gp in refinedPoints:
#                 gp.getCoords(p)
#                 plt.plot(p[0], p[1], marker='o', markersize=20,
#                          linestyle='', color='green')
#
#             for i in xrange(gs.size()):
#                 gs.get(i).getCoords(p)
#                 plt.plot(p[0], p[1], marker='o', markersize=10,
#                          linestyle='', color='blue')
#
#             for gp in newGridPoints:
#                 gp.getCoords(p)
#                 plt.plot(p[0], p[1], marker='o', markersize=10,
#                          linestyle='', color='red')
#
#             plt.title("size = %i" % gs.size())
#             plt.xlim(0, 1)
#             plt.ylim(0, 1)
#             plt.savefig('%i.png' % learner.iteration)

        # reset the learner if the refinement is just simulated
        if simulate:
            learner.grid = oldGrid

        return newGridPoints
Example #12
0
    def __refine(self, grid, B, simulate=False):
        # get sparse grid
        if simulate:
            grid = copyGrid(grid)

        # find how many points should be refined
        pointsNum = self.getNumOfPointsToRefine(len(B))

        # refine now step by step
        newGridPoints = []
        refinedPoints = []
        gs = grid.getStorage()

        # size of grid before refinement
        n1 = gs.getSize()

        # as long as the end of learning has not been reached, continue...
        while pointsNum > 0 and len(B) > 0:
            # note: the highest rated grid point is at the end of B
            vi, gp = B.pop()

            # some printing
            if not simulate and self.verbose:
                print( "refine %i/%i (%i, %i) = %g" % \
                    (pointsNum, len(B) + 1, len(newGridPoints),
                     len(refinedPoints), vi))

            # refine the grid
            oldSize = grid.getSize()
            nps = self._localRefinementStrategy.refine(grid, gp)
            assert grid.getSize() == oldSize + len(nps)

            # ## set surplus vector such that just the desired point
            # ## is going to be refined and nothing else
            # oldgs = HashGridStorage(gs)
            # alpha = DataVector(gs.getSize())
            # alpha.setAll(0.0)
            # alpha[gs.getSequenceNumber(gp)] = 2.0
            # refFunc = SurplusRefinementFunctor(alpha, 1, 1)
            # ## TODO: try refineMaxLevel(refFunc, maxLevel)
            # grid.getGenerator().refine(refFunc)

            # nps = []
            # for i in xrange(gs.getSize()):
            #     if not oldgs.isContaining(gs.getPoint(i)):
            #         nps.append(i)

            # check there have been added some new points
            if len(nps) > 0:
                # if something has been refined then reduce the number
                # of points which should still be refined
                pointsNum -= 1

                # store which point has been refined
                refinedPoints.append(HashGridPoint(gp))
                newGridPoints += nps

        # balance the grid
        if self._balancing:
            newGridPoints += balance(grid)

#         if not simulate:
#             import matplotlib.pyplot as plt
#             gs = grid.getStorage()
#             p = DataVector(gs.getDimension())
#
#             fig = plt.figure()
#             for gp in refinedPoints:
#                 gs.getCoordinates(gp, p)
#                 plt.plot(p[0], p[1], marker='o', markersize=20,
#                          linestyle='', color='green')
#
#             for i in xrange(gs.getSize()):
#                 gpi = gs.getPoint(i)
#                 gs.getCoordinates(gpi, p)
#                 if gpi in self._admissibleSet:
#                     plt.plot(p[0], p[1], marker='o', markersize=10,
#                              linestyle='', color='orange')
#                 else:
#                     plt.plot(p[0], p[1], marker='o', markersize=10,
#                              linestyle='', color='blue')
#
#             for gp in newGridPoints:
#                 gs.getCoordinates(gp, p)
#                 plt.plot(p[0], p[1], marker='o', markersize=10,
#                          linestyle='', color='red')
#
#             plt.title("size = %i" % gs.getSize())
#             plt.xlim(0, 1)
#             plt.ylim(0, 1)
# #             plt.show()
#             plt.savefig('/home/franzefn/Desktop/tmp/var_atan_i%i.png' % gs.getSize())
#             plt.close(fig)

# update admissible set
        if not simulate:
            self._admissibleSet.update(grid, newGridPoints)

        # make sure that I have collected all the new grid points
        assert len(newGridPoints) == gs.getSize() - n1

        return newGridPoints