def test_scheduling_sparse_functions(self): """Tests loop scheduling in presence of sparse functions.""" grid = Grid((10, 10)) time = grid.time_dim u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2) u2 = TimeFunction(name="u2", grid=grid, time_order=2) sf1 = SparseFunction(name='sf1', grid=grid, npoint=1, ntime=10) sf2 = SparseFunction(name='sf2', grid=grid, npoint=1, ntime=10) # Deliberately inject into u1, rather than u1.forward, to create a WAR w/ eqn3 eqn1 = Eq(u1.forward, u1 + 2.0 - u1.backward) eqn2 = sf1.inject(u1, expr=sf1) eqn3 = Eq(u2.forward, u2 + 2*u2.backward - u1.dt2) eqn4 = sf2.interpolate(u2) op = Operator([eqn1] + eqn2 + [eqn3] + eqn4) trees = retrieve_iteration_tree(op) assert len(trees) == 4 # Time loop not shared due to the WAR assert trees[0][0].dim is time and trees[0][0] is trees[1][0] # this IS shared assert trees[1][0] is not trees[2][0] assert trees[2][0].dim is time and trees[2][0] is trees[3][0] # this IS shared # Now single, shared time loop expected eqn2 = sf1.inject(u1.forward, expr=sf1) op = Operator([eqn1] + eqn2 + [eqn3] + eqn4) trees = retrieve_iteration_tree(op) assert len(trees) == 4 assert all(trees[0][0] is i[0] for i in trees)
def test_adjoint_inject_interpolate(self, shape, coords, npoints=19): """ Verify that p.inject is the adjoint of p.interpolate for a devito SparseFunction p """ grid = Grid(shape) a = Function(name="a", grid=grid) a.data[:] = 0. c = Function(name='c', grid=grid) c.data[:] = 27. assert a.grid == c.grid # Inject receiver p = SparseFunction(name="p", grid=grid, npoint=npoints) for i, r in enumerate(coords): p.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints) p.data[:] = 1.2 expr = p.inject(field=a, expr=p) # Read receiver p2 = SparseFunction(name="p2", grid=grid, npoint=npoints) for i, r in enumerate(coords): p2.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints) expr2 = p2.interpolate(expr=c) Operator(expr + expr2)(a=a, c=c) # < P x, y > - < x, P^T y> # Px => p2 # y => p # x => c # P^T y => a term1 = np.dot(p2.data.reshape(-1), p.data.reshape(-1)) term2 = np.dot(c.data.reshape(-1), a.data.reshape(-1)) assert np.isclose((term1-term2) / term1, 0., atol=1.e-6)
def test_override_composite_data(self): i, j = dimify('i j') grid = Grid(shape=(10, 10), dimensions=(i, j)) original_coords = (1., 1.) new_coords = (2., 2.) p_dim = Dimension('p_src') u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2) src1 = SparseFunction(name='src1', grid=grid, dimensions=[time, p_dim], npoint=1, nt=10, coordinates=original_coords) src2 = SparseFunction(name='src1', grid=grid, dimensions=[time, p_dim], npoint=1, nt=10, coordinates=new_coords) op = Operator(src1.inject(u, src1)) # Move the source from the location where the setup put it so we can test # whether the override picks up the original coordinates or the changed ones # Operator.arguments() returns a tuple of (data, dimension_sizes) args = op.arguments(src1=src2)[0] arg_name = src1.name + "_coords" assert (np.array_equal(args[arg_name], np.asarray((new_coords, ))))
def test_injection_wodup(self): """ Test injection operator when the sparse points don't need to be replicated ("wodup" -> w/o duplication) over multiple MPI ranks. """ grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) f = Function(name='f', grid=grid, space_order=0) f.data[:] = 0. if grid.distributor.myrank == 0: coords = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)] else: coords = [] sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords) sf.data[:] = 4. # This is the situation at this point # O is a grid point # * is a sparse point # # O --- O --- O --- O # | * | | * | # O --- O --- O --- O # | | | | # O --- O --- O --- O # | * | | * | # O --- O --- O --- O op = Operator(sf.inject(field=f, expr=sf + 1)) op.apply() assert np.all(f.data == 1.25)
def test_injection_dup(self): """ Test injection operator when the sparse points are replicated over multiple MPI ranks. """ grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) x, y = grid.dimensions f = Function(name='f', grid=grid) f.data[:] = 0. if grid.distributor.myrank == 0: coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)] else: coords = [] sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords) sf.data[:] = 4. # Global view (left) and local view (right, after domain decomposition) # O is a grid point # x is a halo point # A, B, C, D are sparse points # Rank0 Rank1 # O --- O --- O --- O O --- O --- x x --- O --- O # | A | | | | A | | | | | # O --- O --- O --- O O --- O --- x x --- O --- O # | | C | B | --> | | C | | C | B | # O --- O --- O --- O x --- x --- x x --- x --- x # | | D | | Rank2 Rank3 # O --- O --- O --- O x --- x --- x x --- x --- x # | | C | | C | B | # O --- O --- x x --- O --- O # | | D | | D | | # O --- O --- x x --- O --- O # # Expected `f.data` (global view) # # 1.25 --- 1.25 --- 0.00 --- 0.00 # | | | | # 1.25 --- 2.50 --- 2.50 --- 1.25 # | | | | # 0.00 --- 2.50 --- 3.75 --- 1.25 # | | | | # 0.00 --- 1.25 --- 1.25 --- 0.00 op = Operator(sf.inject(field=f, expr=sf + 1)) op.apply() glb_pos_map = grid.distributor.glb_pos_map if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: # rank0 assert np.all(f.data_ro_domain == [[1.25, 1.25], [1.25, 2.5]]) elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: # rank1 assert np.all(f.data_ro_domain == [[0., 0.], [2.5, 1.25]]) elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(f.data_ro_domain == [[0., 2.5], [0., 1.25]]) elif RIGHT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert np.all(f.data_ro_domain == [[3.75, 1.25], [1.25, 0.]])
def test_sparse_function(self, operate_on_empty_cache): """Test caching of SparseFunctions and children objects.""" grid = Grid(shape=(3, 3)) init_cache_size = len(_SymbolCache) cur_cache_size = len(_SymbolCache) u = SparseFunction(name='u', grid=grid, npoint=1, nt=10) # created: u, u(inds), p_u, h_p_u, u_coords, u_coords(inds), d, h_d ncreated = 8 assert len(_SymbolCache) == cur_cache_size + ncreated cur_cache_size = len(_SymbolCache) i = u.inject(expr=u, field=u) # created: ii_u_0*2 (Symbol and ConditionalDimension), ii_u_1*2, ii_u_2*2, # ii_u_3*2, px, py, posx, posy, u_coords (as indexified), ncreated = 2+2+2+2+2+1+1+1 # Note that injection is now lazy so no new symbols should be created assert len(_SymbolCache) == cur_cache_size i.evaluate assert len(_SymbolCache) == cur_cache_size + ncreated # No new symbolic obejcts are created u.inject(expr=u, field=u) assert len(_SymbolCache) == cur_cache_size + ncreated # Let's look at clear_cache now del u del i clear_cache() # At this point, not all children objects have been cleared. In particular, the # ii_u_* Symbols are still alive, as well as p_u and h_p_u. This is because # in the first clear_cache they were still referenced by their "parent" objects # (e.g., ii_u_* by ConditionalDimensions, through `condition`) assert len(_SymbolCache) == init_cache_size + 8 clear_cache() # Now we should be back to the original state assert len(_SymbolCache) == init_cache_size
def test_scheduling_after_rewrite(): """Tests loop scheduling after DSE-induced expression hoisting.""" grid = Grid((10, 10)) u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2) u2 = TimeFunction(name="u2", grid=grid, time_order=2) sf1 = SparseFunction(name='sf1', grid=grid, npoint=1, ntime=10) const = Function(name="const", grid=grid, space_order=2) # Deliberately inject into u1, rather than u1.forward, to create a WAR eqn1 = Eq(u1.forward, u1 + sin(const)) eqn2 = sf1.inject(u1.forward, expr=sf1) eqn3 = Eq(u2.forward, u2 - u1.dt2 + sin(const)) op = Operator([eqn1] + eqn2 + [eqn3]) trees = retrieve_iteration_tree(op) # Check loop nest structure assert len(trees) == 4 assert all(i.dim == j for i, j in zip(trees[0], grid.dimensions)) # time invariant assert trees[1][0].dim == trees[2][0].dim == trees[3][0].dim == grid.time_dim