Exemple #1
0
    def test_interpolation_wodup(self):
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))

        f = Function(name='f', grid=grid, space_order=0)
        f.data[:] = 4.
        if grid.distributor.myrank == 0:
            coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
        else:
            coords = []
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = 0.

        # This is the situation at this point
        # O is a grid point
        # * is a sparse point
        #
        # O --- O --- O --- O
        # |  *  |     |  *  |
        # O --- O --- O --- O
        # |     |     |     |
        # O --- O --- O --- O
        # |  *  |     |  *  |
        # O --- O --- O --- O

        op = Operator(sf.interpolate(expr=f))
        op.apply()

        assert np.all(sf.data == 4.)
Exemple #2
0
    def test_override_composite_data(self):
        i, j = dimify('i j')
        grid = Grid(shape=(10, 10), dimensions=(i, j))
        original_coords = (1., 1.)
        new_coords = (2., 2.)
        p_dim = Dimension('p_src')
        u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)
        src1 = SparseFunction(name='src1',
                              grid=grid,
                              dimensions=[time, p_dim],
                              npoint=1,
                              nt=10,
                              coordinates=original_coords)
        src2 = SparseFunction(name='src1',
                              grid=grid,
                              dimensions=[time, p_dim],
                              npoint=1,
                              nt=10,
                              coordinates=new_coords)
        op = Operator(src1.inject(u, src1))

        # Move the source from the location where the setup put it so we can test
        # whether the override picks up the original coordinates or the changed ones

        # Operator.arguments() returns a tuple of (data, dimension_sizes)
        args = op.arguments(src1=src2)[0]
        arg_name = src1.name + "_coords"
        assert (np.array_equal(args[arg_name], np.asarray((new_coords, ))))
Exemple #3
0
    def test_default_composite_functions(self):
        """
        Test the default argument derivation for composite functions.
        """
        grid = Grid(shape=(5, 6, 7))
        f = TimeFunction(name='f', grid=grid)
        s = SparseFunction(name='s', grid=grid, npoint=3, nt=4)
        s.coordinates.data[:, 0] = np.arange(0., 3.)
        s.coordinates.data[:, 1] = np.arange(1., 4.)
        s.coordinates.data[:, 2] = np.arange(2., 5.)
        op = Operator(s.interpolate(f))

        expected = {
            's': s.data,
            's_coords': s.coordinates.data,
            # Default dimensions of the sparse data
            'p_size': 3,
            'p_s': 0,
            'p_e': 3,
            'd_size': 3,
            'p_s': 0,
            'p_e': 3,
            'time_size': 4,
            'time_s': 0,
            'time_e': 4,
        }
        self.verify_arguments(op.arguments(), expected)
Exemple #4
0
    def test_injection_wodup(self):
        """
        Test injection operator when the sparse points don't need to be replicated
        ("wodup" -> w/o duplication) over multiple MPI ranks.
        """
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))

        f = Function(name='f', grid=grid, space_order=0)
        f.data[:] = 0.
        if grid.distributor.myrank == 0:
            coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
        else:
            coords = []
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = 4.

        # This is the situation at this point
        # O is a grid point
        # * is a sparse point
        #
        # O --- O --- O --- O
        # |  *  |     |  *  |
        # O --- O --- O --- O
        # |     |     |     |
        # O --- O --- O --- O
        # |  *  |     |  *  |
        # O --- O --- O --- O

        op = Operator(sf.inject(field=f, expr=sf + 1))
        op.apply()

        assert np.all(f.data == 1.25)
Exemple #5
0
    def test_injection_dup(self):
        """
        Test injection operator when the sparse points are replicated over
        multiple MPI ranks.
        """
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
        x, y = grid.dimensions

        f = Function(name='f', grid=grid)
        f.data[:] = 0.
        if grid.distributor.myrank == 0:
            coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)]
        else:
            coords = []
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = 4.

        # Global view (left) and local view (right, after domain decomposition)
        # O is a grid point
        # x is a halo point
        # A, B, C, D are sparse points
        #                               Rank0           Rank1
        # O --- O --- O --- O           O --- O --- x   x --- O --- O
        # |  A  |     |     |           |  A  |     |   |     |     |
        # O --- O --- O --- O           O --- O --- x   x --- O --- O
        # |     |  C  |  B  |     -->   |     |  C  |   |  C  |  B  |
        # O --- O --- O --- O           x --- x --- x   x --- x --- x
        # |     |  D  |     |           Rank2           Rank3
        # O --- O --- O --- O           x --- x --- x   x --- x --- x
        #                               |     |  C  |   |  C  |  B  |
        #                               O --- O --- x   x --- O --- O
        #                               |     |  D  |   |  D  |     |
        #                               O --- O --- x   x --- O --- O
        #
        # Expected `f.data` (global view)
        #
        # 1.25 --- 1.25 --- 0.00 --- 0.00
        #  |        |        |        |
        # 1.25 --- 2.50 --- 2.50 --- 1.25
        #  |        |        |        |
        # 0.00 --- 2.50 --- 3.75 --- 1.25
        #  |        |        |        |
        # 0.00 --- 1.25 --- 1.25 --- 0.00

        op = Operator(sf.inject(field=f, expr=sf + 1))
        op.apply()

        glb_pos_map = grid.distributor.glb_pos_map
        if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]:  # rank0
            assert np.all(f.data_ro_domain == [[1.25, 1.25], [1.25, 2.5]])
        elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]:  # rank1
            assert np.all(f.data_ro_domain == [[0., 0.], [2.5, 1.25]])
        elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]:
            assert np.all(f.data_ro_domain == [[0., 2.5], [0., 1.25]])
        elif RIGHT in glb_pos_map[x] and RIGHT in glb_pos_map[y]:
            assert np.all(f.data_ro_domain == [[3.75, 1.25], [1.25, 0.]])
Exemple #6
0
    def test_ownership(self, coords, expected):
        """Given a sparse point ``p`` with known coordinates, this test checks
        that the MPI rank owning ``p`` is retrieved correctly."""
        grid = Grid(shape=(4, 4), extent=(4.0, 4.0))

        sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coords)

        assert len(sf.gridpoints) == len(expected)
        assert all(
            sf._is_owned(i) == (j == grid.distributor.myrank)
            for i, j in zip(sf.gridpoints, expected))
Exemple #7
0
    def test_interpolation_dup(self):
        """
        Test interpolation operator when the sparse points are replicated over
        multiple MPI ranks.
        """
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
        x, y = grid.dimensions

        # Init Function+data
        f = Function(name='f', grid=grid)
        f.data[:] = np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3],
                              [4, 4, 4, 4]])
        coords = np.array([(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)])
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = 0.

        # Global view (left) and local view (right, after domain decomposition)
        # O is a grid point
        # x is a halo point
        # A, B, C, D are sparse points
        #                               Rank0           Rank1
        # O --- O --- O --- O           O --- O --- x   x --- O --- O
        # |  A  |     |     |           |  A  |     |   |     |     |
        # O --- O --- O --- O           O --- O --- x   x --- O --- O
        # |     |  C  |  B  |     -->   |     |  C  |   |  C  |  B  |
        # O --- O --- O --- O           x --- x --- x   x --- x --- x
        # |     |  D  |     |           Rank2           Rank3
        # O --- O --- O --- O           x --- x --- x   x --- x --- x
        #                               |     |  C  |   |  C  |  B  |
        #                               O --- O --- x   x --- O --- O
        #                               |     |  D  |   |  D  |     |
        #                               O --- O --- x   x --- O --- O
        #
        # The initial `f.data` is (global view)
        #
        # 1. --- 1. --- 1. --- 1.
        # |      |      |      |
        # 2. --- 2. --- 2. --- 2.
        # |      |      |      |
        # 3. --- 3. --- 3. --- 3.
        # |      |      |      |
        # 4. --- 4. --- 4. --- 4.
        #
        # Expected `sf.data` (global view)
        #
        # 1.5 --- 2.5 --- 2.5 --- 3.5

        op = Operator(sf.interpolate(expr=f))
        op.apply()

        assert np.all(sf.data == [1.5, 2.5, 2.5, 3.5][grid.distributor.myrank])
Exemple #8
0
    def test_no_index_sparse(self):
        """Test behaviour when the ConditionalDimension is used as a symbol in
        an expression over sparse data objects."""
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid, save=1)
        f.data[:] = 0.

        coordinates = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=4,
                            coordinates=coordinates)
        sf.data[:] = 1.
        sd = sf.dimensions[sf._sparse_position]

        # We want to write to `f` through `sf` so that we obtain the
        # following 4x4 grid (the '*' show the position of the sparse points)
        # We do that by emulating an injection
        #
        # 0 --- 0 --- 0 --- 0
        # |  *  |     |  *  |
        # 0 --- 1 --- 1 --- 0
        # |     |     |     |
        # 0 --- 1 --- 1 --- 0
        # |  *  |     |  *  |
        # 0 --- 0 --- 0 --- 0

        radius = 1
        indices = [(i, i + radius) for i in sf._coordinate_indices]
        bounds = [i.symbolic_size - radius for i in grid.dimensions]

        eqs = []
        for e, i in enumerate(product(*indices)):
            args = [j > 0 for j in i]
            args.extend([j < k for j, k in zip(i, bounds)])
            condition = And(*args, evaluate=False)
            cd = ConditionalDimension('sfc%d' % e,
                                      parent=sd,
                                      condition=condition)
            index = [time] + list(i)
            eqs.append(Eq(f[index], f[index] + sf[cd]))

        op = Operator(eqs)
        op.apply(time=0)

        assert np.all(f.data[0, 1:-1, 1:-1] == 1.)
        assert np.all(f.data[0, 0] == 0.)
        assert np.all(f.data[0, -1] == 0.)
        assert np.all(f.data[0, :, 0] == 0.)
        assert np.all(f.data[0, :, -1] == 0.)
Exemple #9
0
    def test_local_indices(self, coords, expected):
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))

        data = np.array([0., 1., 2., 3.])
        coords = np.array(coords)
        sf = SparseFunction(name='sf', grid=grid, npoint=len(coords))

        # Each of the 4 MPI ranks get one (randomly chosen) sparse point
        assert sf.npoint == 1

        sf.coordinates.data[:] = coords
        sf.data[:] = data

        expected = np.array(expected[grid.distributor.myrank])
        assert np.all(sf.data == expected)
Exemple #10
0
def points(grid, ranges, npoints, name='points'):
    """Create a set of sparse points from a set of coordinate
    ranges for each spatial dimension.
    """
    points = SparseFunction(name=name, grid=grid, npoint=npoints)
    for i, r in enumerate(ranges):
        points.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
    return points
Exemple #11
0
    def test_adjoint_inject_interpolate(self, shape, coords, npoints=19):
        """
        Verify that p.inject is the adjoint of p.interpolate for a
        devito SparseFunction p
        """
        grid = Grid(shape)
        a = Function(name="a", grid=grid)
        a.data[:] = 0.
        c = Function(name='c', grid=grid)
        c.data[:] = 27.

        assert a.grid == c.grid
        # Inject receiver
        p = SparseFunction(name="p", grid=grid, npoint=npoints)
        for i, r in enumerate(coords):
            p.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
        p.data[:] = 1.2
        expr = p.inject(field=a, expr=p)
        # Read receiver
        p2 = SparseFunction(name="p2", grid=grid, npoint=npoints)
        for i, r in enumerate(coords):
            p2.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
        expr2 = p2.interpolate(expr=c)
        Operator(expr + expr2)(a=a, c=c)
        # < P x, y > - < x, P^T y>
        # Px => p2
        # y => p
        # x => c
        # P^T y => a
        term1 = np.dot(p2.data.reshape(-1), p.data.reshape(-1))
        term2 = np.dot(c.data.reshape(-1), a.data.reshape(-1))
        assert np.isclose((term1-term2) / term1, 0., atol=1.e-6)
Exemple #12
0
    def test_scheduling_sparse_functions(self):
        """Tests loop scheduling in presence of sparse functions."""
        grid = Grid((10, 10))
        time = grid.time_dim

        u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2)
        u2 = TimeFunction(name="u2", grid=grid, time_order=2)
        sf1 = SparseFunction(name='sf1', grid=grid, npoint=1, ntime=10)
        sf2 = SparseFunction(name='sf2', grid=grid, npoint=1, ntime=10)

        # Deliberately inject into u1, rather than u1.forward, to create a WAR w/ eqn3
        eqn1 = Eq(u1.forward, u1 + 2.0 - u1.backward)
        eqn2 = sf1.inject(u1, expr=sf1)
        eqn3 = Eq(u2.forward, u2 + 2*u2.backward - u1.dt2)
        eqn4 = sf2.interpolate(u2)

        op = Operator([eqn1] + eqn2 + [eqn3] + eqn4)
        trees = retrieve_iteration_tree(op)
        assert len(trees) == 4
        # Time loop not shared due to the WAR
        assert trees[0][0].dim is time and trees[0][0] is trees[1][0]  # this IS shared
        assert trees[1][0] is not trees[2][0]
        assert trees[2][0].dim is time and trees[2][0] is trees[3][0]  # this IS shared

        # Now single, shared time loop expected
        eqn2 = sf1.inject(u1.forward, expr=sf1)
        op = Operator([eqn1] + eqn2 + [eqn3] + eqn4)
        trees = retrieve_iteration_tree(op)
        assert len(trees) == 4
        assert all(trees[0][0] is i[0] for i in trees)
Exemple #13
0
def test_edge_sparse():
    """
    Test that interpolation uses the correct point for the edge case
    where the sparse point is at the origin with non rational grid spacing.
    Due to round up error the interpolation would use the halo point instead of
    the point (0, 0) without the factorizaion of the expressions.
    """
    grid = Grid(shape=(16, 16), extent=(225., 225.), origin=(25., 35.))
    u = unit_box(shape=(16, 16), grid=grid)
    u._data_with_outhalo[:u.space_order, :] = -1
    u._data_with_outhalo[:, :u.space_order] = -1
    sf1 = SparseFunction(name='s', grid=u.grid, npoint=1)
    sf1.coordinates.data[0, :] = (25.0, 35.0)

    expr = sf1.interpolate(u)
    subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing)}
    op = Operator(expr, subs=subs)
    op()
    assert sf1.data[0] == 0
Exemple #14
0
def test_sparse_function():
    grid = Grid(shape=(3,))
    sf = SparseFunction(name='sf', grid=grid, npoint=3, space_order=2,
                        coordinates=[(0.,), (1.,), (2.,)])
    sf.data[0] = 1.

    pkl_sf = pickle.dumps(sf)
    new_sf = pickle.loads(pkl_sf)

    # .data is initialized, so it should have been pickled too
    assert np.all(sf.data[0] == 1.)
    assert np.all(new_sf.data[0] == 1.)

    # coordinates should also have been pickled
    assert np.all(sf.coordinates.data == new_sf.coordinates.data)

    assert sf.space_order == new_sf.space_order
    assert sf.dtype == new_sf.dtype
    assert sf.npoint == new_sf.npoint
Exemple #15
0
def test_sparse_function():
    grid = Grid(shape=(3,))
    sf = SparseFunction(name='sf', grid=grid, npoint=3, space_order=2,
                        coordinates=[(0.,), (1.,), (2.,)])
    sf.data[0] = 1.

    pkl_sf = pickle.dumps(sf)
    new_sf = pickle.loads(pkl_sf)

    # .data is initialized, so it should have been pickled too
    assert np.all(sf.data[0] == 1.)
    assert np.all(new_sf.data[0] == 1.)

    # coordinates should also have been pickled
    assert np.all(sf.coordinates.data == new_sf.coordinates.data)

    assert sf.space_order == new_sf.space_order
    assert sf.dtype == new_sf.dtype
    assert sf.npoint == new_sf.npoint
Exemple #16
0
    def test_sparse_function(self, operate_on_empty_cache):
        """Test caching of SparseFunctions and children objects."""
        grid = Grid(shape=(3, 3))

        init_cache_size = len(_SymbolCache)
        cur_cache_size = len(_SymbolCache)

        u = SparseFunction(name='u', grid=grid, npoint=1, nt=10)

        # created: u, u(inds), p_u, h_p_u, u_coords, u_coords(inds), d, h_d
        ncreated = 8
        assert len(_SymbolCache) == cur_cache_size + ncreated

        cur_cache_size = len(_SymbolCache)

        i = u.inject(expr=u, field=u)

        # created: ii_u_0*2 (Symbol and ConditionalDimension), ii_u_1*2, ii_u_2*2,
        # ii_u_3*2, px, py, posx, posy, u_coords (as indexified),
        ncreated = 2+2+2+2+2+1+1+1
        # Note that injection is now lazy so no new symbols should be created
        assert len(_SymbolCache) == cur_cache_size
        i.evaluate

        assert len(_SymbolCache) == cur_cache_size + ncreated

        # No new symbolic obejcts are created
        u.inject(expr=u, field=u)
        assert len(_SymbolCache) == cur_cache_size + ncreated

        # Let's look at clear_cache now
        del u
        del i
        clear_cache()
        # At this point, not all children objects have been cleared. In particular, the
        # ii_u_* Symbols are still alive, as well as p_u and h_p_u. This is because
        # in the first clear_cache they were still referenced by their "parent" objects
        # (e.g., ii_u_* by ConditionalDimensions, through `condition`)
        assert len(_SymbolCache) == init_cache_size + 8
        clear_cache()
        # Now we should be back to the original state
        assert len(_SymbolCache) == init_cache_size
def test_interpolation_dx():
    """
    Test interpolation of a SparseFunction from a Derivative of
    a Function.
    """
    u = unit_box(shape=(11, 11))
    sf1 = SparseFunction(name='s', grid=u.grid, npoint=1)
    sf1.coordinates.data[0, :] = (0.5, 0.5)

    op = Operator(sf1.interpolate(u.dx))

    assert sf1.data.shape == (1, )
    u.data[:] = 0.0
    u.data[5, 5] = 4.0
    u.data[4, 5] = 2.0
    u.data[6, 5] = 2.0

    op.apply()
    # Exactly in the middle of 4 points, only 1 nonzero is 4
    assert sf1.data[0] == pytest.approx(-20.0)
def custom_points(grid, ranges, npoints, name='points'):
    """Create a set of sparse points from a set of coordinate
    ranges for each spatial dimension.
    """
    scale = Dimension(name="scale")
    dim = Dimension(name="dim")
    points = SparseFunction(name=name, grid=grid, dimensions=(scale, dim),
                            shape=(3, npoints), npoint=npoints)
    for i, r in enumerate(ranges):
        points.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
    return points
Exemple #19
0
def test_scheduling_after_rewrite():
    """Tests loop scheduling after DSE-induced expression hoisting."""
    grid = Grid((10, 10))
    u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2)
    u2 = TimeFunction(name="u2", grid=grid, time_order=2)
    sf1 = SparseFunction(name='sf1', grid=grid, npoint=1, ntime=10)
    const = Function(name="const", grid=grid, space_order=2)

    # Deliberately inject into u1, rather than u1.forward, to create a WAR
    eqn1 = Eq(u1.forward, u1 + sin(const))
    eqn2 = sf1.inject(u1.forward, expr=sf1)
    eqn3 = Eq(u2.forward, u2 - u1.dt2 + sin(const))

    op = Operator([eqn1] + eqn2 + [eqn3])
    trees = retrieve_iteration_tree(op)

    # Check loop nest structure
    assert len(trees) == 4
    assert all(i.dim == j for i, j in zip(trees[0], grid.dimensions))  # time invariant
    assert trees[1][0].dim == trees[2][0].dim == trees[3][0].dim == grid.time_dim
Exemple #20
0
    def test_ownership(self, coords):
        """Given a sparse point ``p`` with known coordinates, this test checks
        that the MPI rank owning ``p`` is retrieved correctly."""
        grid = Grid(shape=(4, 4), extent=(4.0, 4.0))

        sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coords)

        # The domain decomposition is so that the i-th MPI rank gets exactly one
        # sparse point `p` and, incidentally, `p` is logically owned by `i`
        assert len(sf.gridpoints) == 1
        assert all(grid.distributor.glb_to_rank(i) == grid.distributor.myrank
                   for i in sf.gridpoints)
Exemple #21
0
def test_operator_leakage_sparse():
    """
    Test to ensure that :class:`Operator` creation does not cause
    memory leaks for :class:`SparseFunction` symbols.
    """
    grid = Grid(shape=(5, 6))
    a = Function(name='a', grid=grid)
    s = SparseFunction(name='s', grid=grid, npoint=1, nt=1)
    w_a = weakref.ref(a)
    w_s = weakref.ref(s)

    # Create operator and delete everything again
    op = Operator(s.interpolate(a))
    w_op = weakref.ref(op)
    del op
    del s
    del a
    clear_cache()

    # Test whether things are still hanging around
    assert w_a() is None
    assert w_s() is None
    assert w_op() is None
Exemple #22
0
    def test_scatter_gather(self):
        """
        Test scattering and gathering of sparse data from and to a single MPI rank.

        The initial data distribution looks like:

               rank0           rank1           rank2           rank3
            [0, 1, 2, 3]        []              []               []

        Logically (i.e., given point coordinates and domain decomposition), 0 belongs
        to rank0, 1 belongs to rank1, etc. Thus, after scattering, the data distribution
        is expected to be:

               rank0           rank1           rank2           rank3
                [0]             [1]             [2]             [3]

        Then, locally on each rank, some trivial computation is performed, and we obtain:

               rank0           rank1           rank2           rank3
                [0]             [2]             [4]             [6]

        Finally, we gather the data values and we get:

               rank0           rank1           rank2           rank3
            [0, 2, 4, 6]        []              []              []
        """
        grid = Grid(shape=(4, 4), extent=(4.0, 4.0))

        # Initialization
        if grid.distributor.myrank == 0:
            coords = [(1., 1.), (1., 3.), (3., 1.), (3., 3.)]
        else:
            coords = []
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = list(range(len(coords)))

        # Scatter
        data = sf._dist_scatter()[sf]
        assert len(data) == 1
        assert data[0] == grid.distributor.myrank

        # Do some local computation
        data = data * 2

        # Gather
        sf._dist_gather(data)
        if grid.distributor.myrank == 0:
            assert np.all(sf.data == [0, 2, 4, 6])
        else:
            assert not sf.data
Exemple #23
0
    def test_irregular_write(self):
        """
        Compute a simple stencil S w/o offloading it to YASK because of the presence
        of indirect write accesses (e.g. A[B[i]] = ...); YASK grid functions are however
        used in the generated code to access the data at the right location. This
        test checks that the numerical output is correct after this transformation.

        Initially, the input array (a YASK grid, under the hood), at t=0 is (2D view):

            0 1 2 3
            0 1 2 3
            0 1 2 3
            0 1 2 3

        Then, the Operator "flips" its content, and at timestep t=1 we get (2D view):

            3 2 1 0
            3 2 1 0
            3 2 1 0
            3 2 1 0
        """
        grid = Grid(shape=(4, 4, 4))
        x, y, z = grid.dimensions
        t = grid.stepping_dim
        p = SparseFunction(name='points', grid=grid, nt=1, npoint=4)
        u = TimeFunction(name='yu4D', grid=grid, space_order=0)
        for i in range(4):
            for j in range(4):
                for k in range(4):
                    u.data[0, i, j, k] = k
        ind = lambda i: p.indexed[0, i]
        eqs = [
            Eq(p.indexed[0, 0], 3.),
            Eq(p.indexed[0, 1], 2.),
            Eq(p.indexed[0, 2], 1.),
            Eq(p.indexed[0, 3], 0.),
            Eq(u.indexed[t + 1, ind(x), ind(y), ind(z)], u.indexed[t, x, y, z])
        ]
        op = Operator(eqs, subs=grid.spacing_map)
        op(yu4D=u, time=2)
        assert 'run_solution' not in str(op)
        assert all(np.all(u.data[1, :, :, i] == 3 - i) for i in range(4))
Exemple #24
0
    def test_scatter_gather(self):
        """
        Test scattering and gathering of sparse data from and to a single MPI rank.

        The initial data distribution (A, B, C, and D are generic values) looks like:

               rank0           rank1           rank2           rank3
                [D]             [C]             [B]             [A]

        Logically (i.e., given point coordinates and domain decomposition), A belongs
        to rank0, B belongs to rank1, etc. Thus, after scattering, the data distribution
        is expected to be:

               rank0           rank1           rank2           rank3
                [A]             [B]             [C]             [D]

        Then, locally on each rank, a trivial *2 multiplication is performed:

               rank0           rank1           rank2           rank3
               [A*2]           [B*2]           [C*2]           [D*2]

        Finally, we gather the data values and we get:

               rank0           rank1           rank2           rank3
               [D*2]           [C*2]           [B*2]           [A*2]
        """
        grid = Grid(shape=(4, 4), extent=(4.0, 4.0))

        # Initialization
        data = np.array([3, 2, 1, 0])
        coords = np.array([(3., 3.), (3., 1.), (1., 3.), (1., 1.)])
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = data

        # Scatter
        loc_data = sf._dist_scatter()[sf]
        assert len(loc_data) == 1
        assert loc_data[0] == grid.distributor.myrank

        # Do some local computation
        loc_data = loc_data * 2

        # Gather
        sf._dist_gather(loc_data)
        assert len(sf.data) == 1
        assert np.all(sf.data == data[sf.local_indices] * 2)
Exemple #25
0
 def test_constants(self):
     """
     Check that :class:`Constant` objects are treated correctly.
     """
     grid = Grid(shape=(4, 4, 4))
     c = Constant(name='c', value=2.)
     p = SparseFunction(name='points', grid=grid, nt=1, npoint=1)
     u = TimeFunction(name='yu4D', grid=grid, space_order=0)
     u.data[:] = 0.
     op = Operator([Eq(u.forward, u + c), Eq(p.indexed[0, 0], 1. + c)])
     assert 'run_solution' in str(op)
     op.apply(yu4D=u, c=c, t=11)
     # Check YASK did its job and could read constant grids w/o problems
     assert np.all(u.data[0] == 20.)
     # Check the Constant could be read correctly even in Devito-land, i.e.,
     # outside of run_solution
     assert p.data[0][0] == 3.
     # Check re-executing with another constant gives the correct result
     c2 = Constant(name='c', value=5.)
     op.apply(yu4D=u, c=c2, t=4)
     assert np.all(u.data[0] == 30.)
     assert p.data[0][0] == 6.
Exemple #26
0
    def test_interpolation_dup(self):
        """
        Test interpolation operator when the sparse points are replicated over
        multiple MPI ranks.
        """
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
        x, y = grid.dimensions

        # Init Function+data
        f = Function(name='f', grid=grid)
        glb_pos_map = grid.distributor.glb_pos_map
        if LEFT in glb_pos_map[x]:
            f.data[:] = [[1., 1.], [2., 2.]]
        else:
            f.data[:] = [[3., 3.], [4., 4.]]
        if grid.distributor.myrank == 0:
            coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)]
        else:
            coords = []
        sf = SparseFunction(name='sf',
                            grid=grid,
                            npoint=len(coords),
                            coordinates=coords)
        sf.data[:] = 0.

        # Global view (left) and local view (right, after domain decomposition)
        # O is a grid point
        # x is a halo point
        # A, B, C, D are sparse points
        #                               Rank0           Rank1
        # O --- O --- O --- O           O --- O --- x   x --- O --- O
        # |  A  |     |     |           |  A  |     |   |     |     |
        # O --- O --- O --- O           O --- O --- x   x --- O --- O
        # |     |  C  |  B  |     -->   |     |  C  |   |  C  |  B  |
        # O --- O --- O --- O           x --- x --- x   x --- x --- x
        # |     |  D  |     |           Rank2           Rank3
        # O --- O --- O --- O           x --- x --- x   x --- x --- x
        #                               |     |  C  |   |  C  |  B  |
        #                               O --- O --- x   x --- O --- O
        #                               |     |  D  |   |  D  |     |
        #                               O --- O --- x   x --- O --- O
        #
        # The initial `f.data` is (global view)
        #
        # 1. --- 1. --- 1. --- 1.
        # |      |      |      |
        # 2. --- 2. --- 2. --- 2.
        # |      |      |      |
        # 3. --- 3. --- 3. --- 3.
        # |      |      |      |
        # 4. --- 4. --- 4. --- 4.
        #
        # Expected `sf.data` (global view)
        #
        # 1.5 --- 2.5 --- 2.5 --- 3.5

        op = Operator(sf.interpolate(expr=f))
        op.apply()
        if grid.distributor.myrank == 0:
            assert np.all(sf.data == [1.5, 2.5, 2.5, 3.5])
        else:
            assert sf.data.size == 0