Ejemplo n.º 1
0
    def testRefinement2d_two(self):
        from pysgpp import Grid, DataVector, SurplusRefinementFunctor
        factory = Grid.createLinearBoundaryGrid(2)
        storage = factory.getStorage()

        gen = factory.createGridGenerator()
        gen.regular(0)

        alpha = DataVector(4)

        for i in xrange(len(alpha)):
            alpha[i] = 0.0

        alpha[0] = 1.0
        func = SurplusRefinementFunctor(alpha)

        gen.refine(func)

        alpha2 = DataVector(8)

        for i in xrange(len(alpha2)):
            alpha2[i] = 0.0

        alpha2[4] = 1.0
        func = SurplusRefinementFunctor(alpha2)

        gen.refine(func)
        self.failUnlessEqual(storage.size(), 13)
Ejemplo n.º 2
0
def refineGrid(grid, alpha, f, refnums):
    """
    This function refines a sparse grid function refnum times.

    Arguments:
    grid -- Grid sparse grid from pysgpp
    alpha -- DataVector coefficient vector
    f -- function to be interpolated
    refnums -- int number of refinement steps

    Return nothing
    """
    gs = grid.getStorage()
    gridGen = grid.getGenerator()
    x = DataVector(gs.getDimension())
    for _ in range(refnums):
        # refine a single grid point each time
        gridGen.refine(SurplusRefinementFunctor(alpha, 1))

        # extend alpha vector (new entries uninitialized)
        alpha.resizeZero(gs.getSize())

        # set function values in alpha
        for i in range(gs.getSize()):
            gs.getCoordinates(gs.getPoint(i), x)
            alpha[i] = f(x)

        # hierarchize
        createOperationHierarchisation(grid).doHierarchisation(alpha)
Ejemplo n.º 3
0
    def testFreeRefineTrapezoidBoundaries(self):
        """Tests surplus based refine for Hash-Storage"""
        from pysgpp import GridStorage, HashGenerator
        from pysgpp import SurplusRefinementFunctor, HashRefinementBoundaries, DataVector

        s = GridStorage(2)
        g = HashGenerator()

        g.regularWithBoundaries(s, 1, True)

        d = DataVector(9)
        d[0] = 0.0
        d[1] = 0.0
        d[2] = 0.0
        d[3] = 0.0
        d[4] = 0.0
        d[5] = 0.0
        d[6] = 0.0
        d[7] = 0.0
        d[8] = 1.0

        f = SurplusRefinementFunctor(d)
        r = HashRefinementBoundaries()

        r.free_refine(s, f)

        self.failUnlessEqual(s.size(), 21)
Ejemplo n.º 4
0
    def refineGrid(self):
        self.notifyEventControllers(LearnerEvents.REFINING_GRID)
        pointsNum = self.specification.getNumOfPointsToRefine(
            self.grid.getGenerator().getNumberOfRefinablePoints())

        self.grid.getGenerator().refine(
            SurplusRefinementFunctor(self.alpha, pointsNum,
                                     self.specification.getAdaptThreshold()))
Ejemplo n.º 5
0
 def refineGrid(self):
     """
     Refines grid with the number of points as specified in corresponding
     TrainingSpecification object
     """
     self.notifyEventControllers(LearnerEvents.REFINING_GRID)
     refinableNum = self.grid.getGenerator().getNumberOfRefinablePoints()
     pointsNum = self.getNumOfPointsToRefine(refinableNum)
     functor = SurplusRefinementFunctor(self.errors, pointsNum,
                                        self.getAdaptThreshold())
     self.grid.getGenerator().refine(functor)
Ejemplo n.º 6
0
    def testSurplusFunctor(self):
        """Tests if surplus functor correctly considers absolute values"""
        from pysgpp import GridStorage
        from pysgpp import SurplusRefinementFunctor, DataVector

        s = GridStorage(2)
        d = DataVector(1)
        f = SurplusRefinementFunctor(d)

        d[0] = -10.0
        self.failUnless(f(s, 0) > f.start())

        d[0] = 10.0
        self.failUnless(f(s, 0) > f.start())
Ejemplo n.º 7
0
    def testRefinement(self):
        from pysgpp import Grid, DataVector, SurplusRefinementFunctor
        factory = Grid.createLinearGrid(2)
        storage = factory.getStorage()

        gen = factory.createGridGenerator()
        gen.regular(1)

        self.failUnlessEqual(storage.size(), 1)
        alpha = DataVector(1)
        alpha[0] = 1.0
        func = SurplusRefinementFunctor(alpha)

        gen.refine(func)
        self.failUnlessEqual(storage.size(), 5)
 def test_InconsistentRefinement1Point(self):
     """Dimensionally adaptive refinement using surplus coefficients as local
     error indicator and inconsistent hash refinement.
     
     """
     # point ((3,7), (1,1)) (middle most right) gets larger surplus coefficient
     alpha = DataVector(self.grid.getSize())
     alpha.setAll(1.0)
     alpha[12] = 2.0
     
     functor = SurplusRefinementFunctor(alpha, 1, 0.0)
     refinement = HashRefinementInconsistent()
     refinement.free_refine(self.HashGridStorage, functor)
     
     self.assertEqual(self.grid.getSize(), 17)
Ejemplo n.º 9
0
    def test_ANOVA_Refinement_Surplus(self):
        """Dimensionally adaptive refinement using surplus coefficients as local
        error indicator
        
        """

        # point ((3,7), (1,1)) (middle most right) gets larger surplus coefficient
        alpha = DataVector(self.grid.getSize())
        point_to_refine = None
        for i in range(17):
            point = self.grid_storage.getPoint(i)
            if point.getLevel(0) == 3 and point.getIndex(0) == 7 \
                and point.getLevel(1) == 1 and point.getIndex(1) == 1:
                point_to_refine = point
                alpha[i] = 2.0
            else:
                alpha[i] = 1.0

        # refine one point
        functor = SurplusRefinementFunctor(alpha, 1, 0.0)
        anova_refinement = ANOVAHashRefinement()
        #refinement_strategy = ANOVARefinement(hash_refinement)
        anova_refinement.free_refine(self.grid_storage, functor)

        # check if only the children along x1 direction were inserted
        self.assertEqual(
            self.grid.getSize(), 19,
            'Number of grid points doesn\'t match: %d != %d' %
            (self.grid.getSize(), 19))
        child = point_to_refine.__class__(point_to_refine)
        child.getLeftChild(0)
        self.assertTrue(self.grid_storage.isContaining(child),
                        'Left x1 left child was not found')
        child = point_to_refine.__class__(point_to_refine)
        child.getRightChild(0)
        self.assertTrue(self.grid_storage.isContaining(child),
                        'Left x1 right child was not found')
        child = point_to_refine.__class__(point_to_refine)
        child.getLeftChild(1)
        self.assertFalse(
            self.grid_storage.isContaining(child),
            'Left x2 left child is present, though should not be')
        child = point_to_refine.__class__(point_to_refine)
        child.getRightChild(1)
        self.assertFalse(
            self.grid_storage.isContaining(child),
            'Left x2 right child is present, though should not be')
Ejemplo n.º 10
0
    def testRefinement3d(self):
        from pysgpp import Grid, DataVector, SurplusRefinementFunctor
        factory = Grid.createLinearTrapezoidBoundaryGrid(3)
        storage = factory.getStorage()

        gen = factory.createGridGenerator()
        gen.regular(1)

        self.failUnlessEqual(storage.size(), 27)
        alpha = DataVector(27)
        for i in xrange(len(alpha)):
            alpha[i] = 0.0

        alpha[26] = 1.0
        func = SurplusRefinementFunctor(alpha)

        gen.refine(func)
        self.failUnlessEqual(storage.size(), 81)
Ejemplo n.º 11
0
    def testFreeRefine(self):
        """Tests surplus based refine for Hash-Storage"""
        from pysgpp import GridStorage, HashGenerator
        from pysgpp import SurplusRefinementFunctor, HashRefinement, DataVector

        s = GridStorage(2)
        g = HashGenerator()

        g.regular(s, 1)

        d = DataVector(1)
        d[0] = 1.0

        f = SurplusRefinementFunctor(d)
        r = HashRefinement()

        r.free_refine(s, f)

        self.failUnlessEqual(s.size(), 5)
Ejemplo n.º 12
0
    def test_freeRefineSubspaceIsotropic(self):
        """Refine the isotropic middle subspace"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        for i in [13, 14, 15, 16]:
            alpha[i] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = SubspaceRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha, 1)
        decorator.free_refine(self.HashGridStorage, functor)
        for i in range(self.grid.getSize()):
            HashGridPoint = self.HashGridStorage.getPoint(i)

        self.assertEqual(self.grid.getSize(), 33)

        for i in range(self.grid.getSize()):
            HashGridPoint = self.HashGridStorage.getPoint(i)
            levelIndex = eval(HashGridPoint.toString())
            self.assertFalse(levelIndex[0] == 4 or levelIndex[2] == 4)
Ejemplo n.º 13
0
    def test_freeRefineSubspaceIsotropic(self):
        """Refine the isotropic middle subspace"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        alpha[12] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = GSGRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha, 1)
        decorator.freeRefineSubspace(self.HashGridStorage, functor)
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            print i, HashGridIndex.toString()

        self.assertEqual(self.grid.getSize(), 15)

        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            levelIndex = eval(HashGridIndex.toString())
            self.assertFalse(levelIndex[0] == 4 or levelIndex[2] >= 3)
Ejemplo n.º 14
0
    def testPrewaveletAdaptivedD_two(self):
        from pysgpp import Grid, DataVector, SurplusRefinementFunctor

        factory = Grid.createPrewaveletGrid(4)
        training = buildTrainingVector(
            readDataVector('data/data_dim_4_nops_4096_float.arff.gz'))
        level = 2
        gen = factory.createGridGenerator()
        gen.regular(level)

        alpha = DataVector(factory.getStorage().size())
        for i in xrange(factory.getStorage().size()):
            alpha[i] = i + 1
        gen.refine(SurplusRefinementFunctor(alpha, 1))

        m = generateBBTMatrix(factory, training)
        m_ref = readReferenceMatrix(
            self, factory.getStorage(),
            'data/BBT_prewavelet_dim_4_nopsgrid_17_adapt_float.dat.gz')

        # compare
        compareBBTMatrices(self, m, m_ref)
Ejemplo n.º 15
0
    def test_freeRefineSubspaceAnisotropic(self):
        """Refine Anisotropic subspace (x2)"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        for i in [9, 10, 11, 12]:
            alpha[i] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = SubspaceRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha, 1)
        decorator.free_refine(self.HashGridStorage, functor)
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            print i, HashGridIndex.toString()

        self.assertEqual(self.grid.getSize(), 33)

        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            levelIndex = eval(HashGridIndex.toString())
            self.assertFalse(levelIndex[0] == 4)
Ejemplo n.º 16
0
    def test_Spatial_Refinement_Surplus(self):
        """Spatial refinement using surplus coefficients as local error
        indicator
        
        """

        # point ((2,1), (2,3)) (top right) gets larger surplus coefficient
        alpha = DataVector(self.grid.getSize())
        point_to_refine = None
        for i in xrange(17):
            point = self.grid_storage.get(i)
            if point.getLevel(0) == 2 and point.getIndex(0) == 1 \
                and point.getLevel(1) == 2 and point.getIndex(1) == 3:
                point_to_refine = point
                alpha[i] = 2.0
            else:
                alpha[i] = 1.0
        # refine one point
        self.grid.createGridGenerator().refine(
            SurplusRefinementFunctor(alpha, 1, 0))
        # check that all children were inserted
        self.assertEqual(self.grid.getSize(), 21,
                         'Number of grid points doesn\'t match')
        child = point_to_refine.__class__(point_to_refine)
        self.grid_storage.left_child(child, 0)
        self.assertTrue(self.grid_storage.has_key(child),
                        'Left x1 left child was not found')
        child = point_to_refine.__class__(point_to_refine)
        self.grid_storage.right_child(child, 0)
        self.assertTrue(self.grid_storage.has_key(child),
                        'Left x1 right child was not found')
        child = point_to_refine.__class__(point_to_refine)
        self.grid_storage.left_child(child, 1)
        self.assertTrue(self.grid_storage.has_key(child),
                        'Left x2 left child was not found')
        child = point_to_refine.__class__(point_to_refine)
        self.grid_storage.right_child(child, 1)
        self.assertTrue(self.grid_storage.has_key(child),
                        'Left x2 right child was not found')
Ejemplo n.º 17
0
    def testFreeRefineTrapezoidBoundaries(self):
        """Tests surplus based refine for Hash-Storage"""
        from pysgpp import GridStorage, HashGenerator
        from pysgpp import SurplusRefinementFunctor, HashRefinementBoundaries, DataVector

        s = GridStorage(2)
        g = HashGenerator()

        g.regularWithBoundaries(s, 2, False)

        d = DataVector(17)

        for i in xrange(len(d)):
            d[i] = 0.0

        d[12] = 1.0

        f = SurplusRefinementFunctor(d)
        r = HashRefinementBoundaries()

        r.free_refine(s, f)

        self.failUnlessEqual(s.size(), 21)
Ejemplo n.º 18
0
    def testRefinement2d(self):
        from pysgpp import Grid, DataVector, SurplusRefinementFunctor
        factory = Grid.createLinearTrapezoidBoundaryGrid(2)
        storage = factory.getStorage()

        gen = factory.createGridGenerator()
        gen.regular(1)

        self.failUnlessEqual(storage.size(), 9)
        alpha = DataVector(9)
        alpha[0] = 0.0
        alpha[1] = 0.0
        alpha[2] = 0.0
        alpha[3] = 0.0
        alpha[4] = 0.0
        alpha[5] = 0.0
        alpha[6] = 0.0
        alpha[7] = 0.0
        alpha[8] = 1.0
        func = SurplusRefinementFunctor(alpha)

        gen.refine(func)
        self.failUnlessEqual(storage.size(), 21)
Ejemplo n.º 19
0
print "dimensionality:                   {}".format(dim)

# create regular grid, level 3
level = 3
gridGen = grid.createGridGenerator()
gridGen.regular(level)
print "number of initial grid points:    {}".format(gridStorage.size())

# definition of function to interpolate - nonsymmetric(!)
f = lambda x0, x1: 16.0 * (x0 - 1) * x0 * (x1 - 1) * x1 * x1
# create coefficient vector
alpha = DataVector(gridStorage.size())
print "length of alpha vector:           {}".format(alpha.getSize())

# now refine adaptively 5 times
for refnum in range(5):
    # set function values in alpha
    for i in xrange(gridStorage.size()):
        gp = gridStorage.get(i)
        alpha[i] = f(gp.getCoord(0), gp.getCoord(1))

    # hierarchize
    createOperationHierarchisation(grid).doHierarchisation(alpha)

    # refine a single grid point each time
    gridGen.refine(SurplusRefinementFunctor(alpha, 1))
    print "refinement step {}, new grid size: {}".format(
        refnum + 1, gridStorage.size())

    # extend alpha vector (new entries uninitialized)
    alpha.resize(gridStorage.size())
Ejemplo n.º 20
0
def discretize(grid,
               alpha,
               f,
               epsilon=0.,
               refnums=0,
               pointsNum=10,
               level=0,
               deg=1,
               useDiscreteL2Error=True):
    """
    discretize f with a sparse grid

    @param grid: Grid
    @param alpha: surplus vector
    @param f: function
    @param epsilon: float, error tolerance
    @param refnums: int, number of refinment steps
    @param pointsNum: int, number of points to be refined per step
    @param level: int, initial grid level
    @param deg: int, degree of lagrange basis
    """
    # copy grid
    jgrid = copyGrid(grid, level=level, deg=deg)
    jgs = jgrid.getStorage()
    jgn = jgrid.createGridGenerator()
    basis_alpha = DataVector(alpha)

    # compute joined sg function
    jalpha = computeCoefficients(jgrid, grid, alpha, f)

    # compute errors
    maxdrift = None
    accMiseL2 = None
    l2error_grid = alpha.l2Norm()
    if useDiscreteL2Error:
        maxdrift, accMiseL2 = computeErrors(jgrid, jalpha, grid, alpha, f)
    else:
        accMiseL2 = l2error_grid

#     print "iteration 0/%i (%i, %i, %g): %g, %g, %s" % \
#         (refnums, jgs.size(), len(jalpha),
#          epsilon, accMiseL2, l2error_grid, maxdrift)

    ref = 0
    errs = [jgs.size(), accMiseL2, l2error_grid, maxdrift]
    bestGrid, bestAlpha, bestL2Error = copyGrid(jgrid), DataVector(
        jalpha), accMiseL2

    # repeat refinement as long as there are iterations and the
    # minimum error epsilon is reached
    while ref < refnums and bestL2Error > epsilon:
        oldgrid = copyGrid(jgrid)
        rp = jgn.getNumberOfRefinablePoints(
        )  # max(1, min(pointsNum, jgn.getNumberOfRefinablePoints()))
        jgn.refine(SurplusRefinementFunctor(jalpha, rp, epsilon))

        # if grid point has been added in the last iteration step
        if len(basis_alpha) == jgs.size():
            break

        # extend alpha vector...
        basis_alpha.resizeZero(jgs.size())

        # ------------------------------
        # compute joined sg function
        jalpha = computeCoefficients(jgrid, grid, basis_alpha, f)
        # compute useDiscreteL2Error
        l2error_grid = estimateL2error(oldgrid, jgrid, jalpha)

        # do Monte Carlo integration for obtaining the accMiseL2
        if useDiscreteL2Error:
            maxdrift, accMiseL2 = computeErrors(jgrid, jalpha, grid, alpha, f)
        # ------------------------------
        print "iteration %i/%i (%i, %i, %i, %i, %g): %g, %g, %s -> current best %g" % \
            (ref + 1, refnums,
             jgs.size(), len(jalpha),
             bestGrid.getSize(), len(bestAlpha),
             epsilon,
             accMiseL2, l2error_grid, maxdrift, bestL2Error)

        # check whether the new grid is better than the current best one
        # using the discrete l2 error. If no MC integration is done,
        # use the l2 error approximation via the sparse grid surpluses
        if (not useDiscreteL2Error and l2error_grid < bestL2Error) or \
                (useDiscreteL2Error and accMiseL2 < bestL2Error):
            bestGrid = copyGrid(jgrid)
            bestAlpha = DataVector(jalpha)
            if useDiscreteL2Error:
                bestL2Error = accMiseL2
            else:
                bestL2Error = l2error_grid
            errs = [jgs.size(), accMiseL2, l2error_grid, maxdrift]

        ref += 1

    return bestGrid, bestAlpha, errs