def test_if_halo_mpi(self, nbl): """ Test that FD halo is padded as well. """ grid = Grid((10, 10)) x, y = grid.dimensions glb_pos_map = grid.distributor.glb_pos_map f = Function(name="f", grid=grid) a = np.zeros((10 - 2 * nbl, 10 - 2 * nbl)) na = a.shape[0] a[:, 0] = 1 a[:, -1] = 3 a[0, :] = 2 a[-1, :] = 4 initialize_function(f, a, nbl) if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: expected = np.pad(a[:na // 2, :na // 2], [(1 + nbl, 0), (1 + nbl, 0)], 'edge') assert np.all(f._data_with_outhalo._local == expected) elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: expected = np.pad(a[:na // 2, na // 2:], [(1 + nbl, 0), (0, 1 + nbl)], 'edge') assert np.all(f._data_with_outhalo._local == expected) elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: expected = np.pad(a[na // 2:, :na // 2], [(0, 1 + nbl), (1 + nbl, 0)], 'edge') assert np.all(f._data_with_outhalo._local == expected) else: expected = np.pad(a[na // 2:, na // 2:], [(0, 1 + nbl), (0, 1 + nbl)], 'edge') assert np.all(f._data_with_outhalo._local == expected)
def test_if_halo(self, ndim, nbl): """ Test that FD halo is padded as well. """ grid = Grid(tuple([11] * ndim)) f = Function(name="f", grid=grid) a = np.zeros(tuple([11 - 2 * nbl] * ndim)) a[..., 0] = 1 a[..., -1] = 3 if ndim == 3: a[:, 0, :] = 5 a[:, -1, :] = 6 a[0, ...] = 2 a[-1, ...] = 4 initialize_function(f, a, nbl) assert np.all(np.take(f._data_with_outhalo, 0, axis=0) == 2) assert np.all(np.take(f._data_with_outhalo, -1, axis=0) == 4) if ndim == 3: assert f._data_with_outhalo[7, 7, 7] == 0 assert np.take(f._data_with_outhalo, 0, axis=-1)[7, 7] == 1 assert np.take(f._data_with_outhalo, -1, axis=-1)[7, 7] == 3 assert np.take(f._data_with_outhalo, 0, axis=1)[7, 7] == 5 assert np.take(f._data_with_outhalo, -1, axis=1)[7, 7] == 6 else: assert f._data_with_outhalo[7, 7] == 0 assert np.take(f._data_with_outhalo, 0, axis=-1)[7] == 1 assert np.take(f._data_with_outhalo, -1, axis=-1)[7] == 3
def test_nbl_zero(self): """Test for nbl = 0.""" a = np.arange(16).reshape((4, 4)) grid = Grid(shape=(4, 4)) f = Function(name='f', grid=grid, dtype=np.int32) initialize_function(f, a, 0) assert np.all(a[:] - np.array(f.data[:]) == 0)
def _gen_phys_param(self, field, name, space_order, default_value=0): if field is None: return default_value if isinstance(field, np.ndarray): function = Function(name=name, grid=self.grid, space_order=space_order) initialize_function(function, field, self.nbl) else: function = Constant(name=name, value=field) return function
def test_if_serial(self): """Test in serial.""" a = np.arange(16).reshape((4, 4)) grid = Grid(shape=(12, 12)) f = Function(name='f', grid=grid, dtype=np.int32) initialize_function(f, a, 4, mode='reflect') assert np.all(a[:, ::-1] - np.array(f.data[4:8, 0:4]) == 0) assert np.all(a[:, ::-1] - np.array(f.data[4:8, 8:12]) == 0) assert np.all(a[::-1, :] - np.array(f.data[0:4, 4:8]) == 0) assert np.all(a[::-1, :] - np.array(f.data[8:12, 4:8]) == 0)
def test_if_serial_asymmetric(self): """Test in serial with asymmetric padding.""" a = np.arange(35).reshape((7, 5)) grid = Grid(shape=(12, 12)) f = Function(name='f', grid=grid, dtype=np.int32) initialize_function(f, a, ((2, 3), (4, 3)), mode='reflect') assert np.all(a[:, -2::-1] - np.array(f.data[2:9, 0:4]) == 0) assert np.all(a[:, :1:-1] - np.array(f.data[2:9, 9:12]) == 0) assert np.all(a[1::-1, :] - np.array(f.data[0:2, 4:9]) == 0) assert np.all(a[6:3:-1, :] - np.array(f.data[9:12, 4:9]) == 0)
def _gen_phys_param(self, field, name, space_order, is_param=True, default_value=0): if field is None: return default_value if isinstance(field, np.ndarray): function = Function(name=name, grid=self.grid, space_order=space_order, parameter=is_param) initialize_function(function, field, self.padsizes) else: function = Constant(name=name, value=field, dtype=self.grid.dtype) self._physical_parameters.update([name]) return function
def forward(ctx, input, model, geometry, solver, device): ctx.model = model ctx.geometry = geometry ctx.solver = solver ctx.device = device # Prepare input input = input[0, 0, ...].detach().cpu().numpy() vp = Function(name='vp', grid=ctx.model.grid, space_order=ctx.model.space_order) initialize_function(vp, input**(-0.5), ctx.model.nbl) ctx.model.vp = vp # Nonlinear forward modeling d_nonlin, ctx.u0 = ctx.solver.forward(save=True)[:2] return torch.from_numpy(np.array(d_nonlin.data)).to(ctx.device)
def test_if_parallel(self): a = np.arange(36).reshape((6, 6)) grid = Grid(shape=(18, 18)) x, y = grid.dimensions glb_pos_map = grid.distributor.glb_pos_map f = Function(name='f', grid=grid, halo=((3, 3), (3, 3)), dtype=np.int32) initialize_function(f, a, 6, mode='reflect') if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(a[::-1, 0:3] - np.array(f.data[0:6, 6:9]) == 0) assert np.all(a[0:3, ::-1] - np.array(f.data[6:9, 0:6]) == 0) elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert np.all(a[::-1, 3:6] - np.array(f.data[0:6, 9:12]) == 0) assert np.all(a[0:3, ::-1] - np.array(f.data[6:9, 12:18]) == 0) elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(a[::-1, 0:3] - np.array(f.data[12:18, 6:9]) == 0) assert np.all(a[3:6, ::-1] - np.array(f.data[9:12, 0:6]) == 0) else: assert np.all(a[::-1, 3:6] - np.array(f.data[12:18, 9:12]) == 0) assert np.all(a[3:6, ::-1] - np.array(f.data[9:12, 12:18]) == 0)
def vs(self, vs): """ Set a new velocity model and update square slowness. Parameters ---------- vp : float or array New velocity in km/s. """ # Update the square slowness according to new value if isinstance(vs, np.ndarray): if vs.shape == self.vs.shape: self.vs.data[:] = vs[:] elif vs.shape == self.shape: initialize_function(self._vs, vs, self.nbl) else: raise ValueError("Incorrect input size %s for model of size" % vs.shape + " %s without or %s with padding" % (self.shape, self.vs.shape)) else: self._vs.data = vs
def update(self, name, value): """ Update the physical parameter param. """ try: param = getattr(self, name) except AttributeError: # No physical parameter with tha name, create it setattr(self, name, self._gen_phys_param(name, value, self.space_order)) return # Update the square slowness according to new value if isinstance(value, np.ndarray): if value.shape == param.shape: param.data[:] = value[:] elif value.shape == self.shape: initialize_function(param, value, self.nbl) else: raise ValueError("Incorrect input size %s for model" % value.shape + " %s without or %s with padding" % (self.shape, param.shape)) else: param.data = value