def test_adjoint_inject_interpolate(self, shape, coords, npoints=19): """ Verify that p.inject is the adjoint of p.interpolate for a devito SparseFunction p """ grid = Grid(shape) a = Function(name="a", grid=grid) a.data[:] = 0. c = Function(name='c', grid=grid) c.data[:] = 27. assert a.grid == c.grid # Inject receiver p = SparseFunction(name="p", grid=grid, npoint=npoints) for i, r in enumerate(coords): p.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints) p.data[:] = 1.2 expr = p.inject(field=a, expr=p) # Read receiver p2 = SparseFunction(name="p2", grid=grid, npoint=npoints) for i, r in enumerate(coords): p2.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints) expr2 = p2.interpolate(expr=c) Operator(expr + expr2)(a=a, c=c) # < P x, y > - < x, P^T y> # Px => p2 # y => p # x => c # P^T y => a term1 = np.dot(p2.data.reshape(-1), p.data.reshape(-1)) term2 = np.dot(c.data.reshape(-1), a.data.reshape(-1)) assert np.isclose((term1-term2) / term1, 0., atol=1.e-6)
def test_scheduling_sparse_functions(self): """Tests loop scheduling in presence of sparse functions.""" grid = Grid((10, 10)) time = grid.time_dim u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2) u2 = TimeFunction(name="u2", grid=grid, time_order=2) sf1 = SparseFunction(name='sf1', grid=grid, npoint=1, ntime=10) sf2 = SparseFunction(name='sf2', grid=grid, npoint=1, ntime=10) # Deliberately inject into u1, rather than u1.forward, to create a WAR w/ eqn3 eqn1 = Eq(u1.forward, u1 + 2.0 - u1.backward) eqn2 = sf1.inject(u1, expr=sf1) eqn3 = Eq(u2.forward, u2 + 2*u2.backward - u1.dt2) eqn4 = sf2.interpolate(u2) op = Operator([eqn1] + eqn2 + [eqn3] + eqn4) trees = retrieve_iteration_tree(op) assert len(trees) == 4 # Time loop not shared due to the WAR assert trees[0][0].dim is time and trees[0][0] is trees[1][0] # this IS shared assert trees[1][0] is not trees[2][0] assert trees[2][0].dim is time and trees[2][0] is trees[3][0] # this IS shared # Now single, shared time loop expected eqn2 = sf1.inject(u1.forward, expr=sf1) op = Operator([eqn1] + eqn2 + [eqn3] + eqn4) trees = retrieve_iteration_tree(op) assert len(trees) == 4 assert all(trees[0][0] is i[0] for i in trees)
def test_default_composite_functions(self): """ Test the default argument derivation for composite functions. """ grid = Grid(shape=(5, 6, 7)) f = TimeFunction(name='f', grid=grid) s = SparseFunction(name='s', grid=grid, npoint=3, nt=4) s.coordinates.data[:, 0] = np.arange(0., 3.) s.coordinates.data[:, 1] = np.arange(1., 4.) s.coordinates.data[:, 2] = np.arange(2., 5.) op = Operator(s.interpolate(f)) expected = { 's': s.data, 's_coords': s.coordinates.data, # Default dimensions of the sparse data 'p_size': 3, 'p_s': 0, 'p_e': 3, 'd_size': 3, 'p_s': 0, 'p_e': 3, 'time_size': 4, 'time_s': 0, 'time_e': 4, } self.verify_arguments(op.arguments(), expected)
def test_interpolation_wodup(self): grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) f = Function(name='f', grid=grid, space_order=0) f.data[:] = 4. if grid.distributor.myrank == 0: coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)] else: coords = [] sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords) sf.data[:] = 0. # This is the situation at this point # O is a grid point # * is a sparse point # # O --- O --- O --- O # | * | | * | # O --- O --- O --- O # | | | | # O --- O --- O --- O # | * | | * | # O --- O --- O --- O op = Operator(sf.interpolate(expr=f)) op.apply() assert np.all(sf.data == 4.)
def test_interpolation_dup(self): """ Test interpolation operator when the sparse points are replicated over multiple MPI ranks. """ grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) x, y = grid.dimensions # Init Function+data f = Function(name='f', grid=grid) f.data[:] = np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]) coords = np.array([(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)]) sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords) sf.data[:] = 0. # Global view (left) and local view (right, after domain decomposition) # O is a grid point # x is a halo point # A, B, C, D are sparse points # Rank0 Rank1 # O --- O --- O --- O O --- O --- x x --- O --- O # | A | | | | A | | | | | # O --- O --- O --- O O --- O --- x x --- O --- O # | | C | B | --> | | C | | C | B | # O --- O --- O --- O x --- x --- x x --- x --- x # | | D | | Rank2 Rank3 # O --- O --- O --- O x --- x --- x x --- x --- x # | | C | | C | B | # O --- O --- x x --- O --- O # | | D | | D | | # O --- O --- x x --- O --- O # # The initial `f.data` is (global view) # # 1. --- 1. --- 1. --- 1. # | | | | # 2. --- 2. --- 2. --- 2. # | | | | # 3. --- 3. --- 3. --- 3. # | | | | # 4. --- 4. --- 4. --- 4. # # Expected `sf.data` (global view) # # 1.5 --- 2.5 --- 2.5 --- 3.5 op = Operator(sf.interpolate(expr=f)) op.apply() assert np.all(sf.data == [1.5, 2.5, 2.5, 3.5][grid.distributor.myrank])
def test_edge_sparse(): """ Test that interpolation uses the correct point for the edge case where the sparse point is at the origin with non rational grid spacing. Due to round up error the interpolation would use the halo point instead of the point (0, 0) without the factorizaion of the expressions. """ grid = Grid(shape=(16, 16), extent=(225., 225.), origin=(25., 35.)) u = unit_box(shape=(16, 16), grid=grid) u._data_with_outhalo[:u.space_order, :] = -1 u._data_with_outhalo[:, :u.space_order] = -1 sf1 = SparseFunction(name='s', grid=u.grid, npoint=1) sf1.coordinates.data[0, :] = (25.0, 35.0) expr = sf1.interpolate(u) subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing)} op = Operator(expr, subs=subs) op() assert sf1.data[0] == 0
def test_interpolation_dx(): """ Test interpolation of a SparseFunction from a Derivative of a Function. """ u = unit_box(shape=(11, 11)) sf1 = SparseFunction(name='s', grid=u.grid, npoint=1) sf1.coordinates.data[0, :] = (0.5, 0.5) op = Operator(sf1.interpolate(u.dx)) assert sf1.data.shape == (1, ) u.data[:] = 0.0 u.data[5, 5] = 4.0 u.data[4, 5] = 2.0 u.data[6, 5] = 2.0 op.apply() # Exactly in the middle of 4 points, only 1 nonzero is 4 assert sf1.data[0] == pytest.approx(-20.0)
def test_operator_leakage_sparse(): """ Test to ensure that :class:`Operator` creation does not cause memory leaks for :class:`SparseFunction` symbols. """ grid = Grid(shape=(5, 6)) a = Function(name='a', grid=grid) s = SparseFunction(name='s', grid=grid, npoint=1, nt=1) w_a = weakref.ref(a) w_s = weakref.ref(s) # Create operator and delete everything again op = Operator(s.interpolate(a)) w_op = weakref.ref(op) del op del s del a clear_cache() # Test whether things are still hanging around assert w_a() is None assert w_s() is None assert w_op() is None
def test_interpolation_dup(self): """ Test interpolation operator when the sparse points are replicated over multiple MPI ranks. """ grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) x, y = grid.dimensions # Init Function+data f = Function(name='f', grid=grid) glb_pos_map = grid.distributor.glb_pos_map if LEFT in glb_pos_map[x]: f.data[:] = [[1., 1.], [2., 2.]] else: f.data[:] = [[3., 3.], [4., 4.]] if grid.distributor.myrank == 0: coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)] else: coords = [] sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords) sf.data[:] = 0. # Global view (left) and local view (right, after domain decomposition) # O is a grid point # x is a halo point # A, B, C, D are sparse points # Rank0 Rank1 # O --- O --- O --- O O --- O --- x x --- O --- O # | A | | | | A | | | | | # O --- O --- O --- O O --- O --- x x --- O --- O # | | C | B | --> | | C | | C | B | # O --- O --- O --- O x --- x --- x x --- x --- x # | | D | | Rank2 Rank3 # O --- O --- O --- O x --- x --- x x --- x --- x # | | C | | C | B | # O --- O --- x x --- O --- O # | | D | | D | | # O --- O --- x x --- O --- O # # The initial `f.data` is (global view) # # 1. --- 1. --- 1. --- 1. # | | | | # 2. --- 2. --- 2. --- 2. # | | | | # 3. --- 3. --- 3. --- 3. # | | | | # 4. --- 4. --- 4. --- 4. # # Expected `sf.data` (global view) # # 1.5 --- 2.5 --- 2.5 --- 3.5 op = Operator(sf.interpolate(expr=f)) op.apply() if grid.distributor.myrank == 0: assert np.all(sf.data == [1.5, 2.5, 2.5, 3.5]) else: assert sf.data.size == 0