def get_reference(test_name, backend, domain, origins, shapes, masks=None): reference_data = reference_module.__dict__[test_name](*domain) res = {} for k, data in reference_data.items(): if np.isscalar(data): res[k] = np.float_(data) else: try: field = gt_store.from_array( data, dtype=np.float_, default_origin=origins[k], shape=shapes[k], backend=backend.name, ) except KeyError: field = gt_store.from_array( data, dtype=np.float_, default_origin=origins[k[:-len("_reference")]], shape=shapes[k[:-len("_reference")]], backend=backend.name, ) res[k] = field return res
def test_lower_dimensional_masked_2dcond(backend): @gtscript.stencil(backend=backend) def copy_2to3( cond: gtscript.Field[gtscript.IJK, np.float_], inp: gtscript.Field[gtscript.IJ, np.float_], outp: gtscript.Field[gtscript.IJK, np.float_], ): with computation(FORWARD), interval(...): if cond > 0.0: outp = inp inp = np.random.randn(10, 10) outp = np.random.randn(10, 10, 10) cond = np.random.randn(10, 10, 10) inp_f = gt_storage.from_array(inp, default_origin=(0, 0), backend=backend) outp_f = gt_storage.from_array(outp, default_origin=(0, 0, 0), backend=backend) cond_f = gt_storage.from_array(cond, default_origin=(0, 0, 0), backend=backend) copy_2to3(cond_f, inp_f, outp_f) inp3d = np.empty_like(outp) inp3d[...] = inp[:, :, np.newaxis] outp = np.choose(cond > 0.0, [outp, inp3d]) outp_f.device_to_host() assert np.allclose(outp, np.asarray(outp_f))
def test_copy_cpu(method, backend): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array( np.random.randn(*shape), default_origin=default_origin, backend=backend ) import copy if method == "deepcopy": stor_copy = copy.deepcopy(stor) elif method == "copy_method": stor_copy = stor.copy() else: raise ValueError(f"Test not implemented for copying using '{method}'") assert stor is not stor_copy assert stor._raw_buffer.ctypes.data != stor_copy._raw_buffer.ctypes.data if stor._raw_buffer.ctypes.data < stor_copy._raw_buffer.ctypes.data: assert ( stor._raw_buffer.ctypes.data + len(stor._raw_buffer) <= stor_copy._raw_buffer.ctypes.data ) else: assert ( stor._raw_buffer.ctypes.data + len(stor._raw_buffer) >= stor_copy._raw_buffer.ctypes.data ) np.testing.assert_equal(stor_copy.view(np.ndarray), stor.view(np.ndarray))
def test_deepcopy_gpu_unmanaged(backend="gtcuda"): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array( np.random.randn(*shape), default_origin=default_origin, backend=backend, managed_memory=False, ) import copy stor_copy = copy.deepcopy(stor) assert stor is not stor_copy assert stor._sync_state is not stor_copy._sync_state assert stor._raw_buffer.ctypes.data != stor_copy._raw_buffer.ctypes.data if stor._raw_buffer.ctypes.data < stor_copy._raw_buffer.ctypes.data: assert (stor._raw_buffer.ctypes.data + len(stor._raw_buffer) <= stor_copy._raw_buffer.ctypes.data) else: assert (stor._raw_buffer.ctypes.data + len(stor._raw_buffer) >= stor_copy._raw_buffer.ctypes.data) assert stor._device_raw_buffer.data.ptr != stor_copy._device_raw_buffer.data.ptr if stor._device_raw_buffer.data.ptr < stor_copy._device_raw_buffer.data.ptr: assert (stor._device_raw_buffer.data.ptr + len(stor._device_raw_buffer) <= stor_copy._device_raw_buffer.data.ptr) else: assert (stor._device_raw_buffer.data.ptr + len(stor._device_raw_buffer) >= stor_copy._device_raw_buffer.data.ptr) np.testing.assert_equal(stor_copy.view(np.ndarray), stor.view(np.ndarray)) assert (stor._device_field[...] == stor_copy._device_field[...]).all()
def test_copy_gpu(method, backend="gtcuda"): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array( np.random.randn(*shape), default_origin=default_origin, backend=backend, managed_memory=True, ) import copy if method == "deepcopy": stor_copy = copy.deepcopy(stor) elif method == "copy_method": stor_copy = stor.copy() else: raise ValueError(f"Test not implemented for copying using '{method}'") assert stor is not stor_copy assert stor._raw_buffer.data.ptr != stor_copy._raw_buffer.data.ptr if stor._raw_buffer.data.ptr < stor_copy._raw_buffer.data.ptr: assert stor._raw_buffer.data.ptr + len(stor._raw_buffer) <= stor_copy._raw_buffer.data.ptr else: assert stor._raw_buffer.data.ptr + len(stor._raw_buffer) >= stor_copy._raw_buffer.data.ptr np.testing.assert_equal(stor_copy.view(np.ndarray), stor.view(np.ndarray))
def test_lower_dimensional_inputs_2d_to_3d_forward(backend): @gtscript.stencil(backend=backend) def copy_2to3( inp: gtscript.Field[gtscript.IJ, np.float_], outp: gtscript.Field[gtscript.IJK, np.float_] ): with computation(FORWARD), interval(...): outp[0, 0, 0] = inp inp_f = gt_storage.from_array(np.random.randn(10, 10), default_origin=(0, 0), backend=backend) outp_f = gt_storage.from_array( np.random.randn(10, 10, 10), default_origin=(0, 0, 0), backend=backend ) copy_2to3(inp_f, outp_f) inp_f.device_to_host() outp_f.device_to_host() assert np.allclose(np.asarray(outp_f), np.asarray(inp_f)[:, :, np.newaxis])
def test_origin_offsetting_nofrozen(dace_stencil, domain, outp_origin): backend = dace_stencil.backend inp = gt_storage.from_array(data=7.0, dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend=backend) outp = gt_storage.zeros(dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend=backend) origin = {"inp": (0, 0, 0), "outp": outp_origin} inp.host_to_device() outp.host_to_device() @dace.program( device=dace.DeviceType.GPU if "gpu" in backend else dace.DeviceType.CPU ) def call_stencil_object(): dace_stencil(inp=inp, outp=outp, domain=domain, origin=origin) call_stencil_object() outp.device_to_host(force=True) assert np.allclose(inp, 7.0) assert np.allclose( np.asarray(outp)[outp_origin[0]:outp_origin[0] + domain[0], outp_origin[1]:outp_origin[1] + domain[1], outp_origin[2]:outp_origin[2] + domain[2], ], 7.0, ) assert np.sum(np.asarray(outp), axis=(0, 1, 2)) == np.prod(domain) * 7.0
def test_origin_k_fields(backend): @gtscript.stencil(backend=backend, rebuild=True) def k_to_ijk(outp: Field[np.float64], inp: Field[gtscript.K, np.float64]): with computation(PARALLEL), interval(...): outp = inp origin = {"outp": (0, 0, 1), "inp": (2,)} domain = (2, 2, 8) data = np.arange(10, dtype=np.float64) inp = gt_storage.from_array( data=data, shape=(10,), default_origin=(0,), dtype=np.float64, mask=[False, False, True], backend=backend, ) outp = gt_storage.zeros( shape=(2, 2, 10), default_origin=(0, 0, 0), dtype=np.float64, backend=backend ) k_to_ijk(outp, inp, origin=origin, domain=domain) inp.device_to_host() outp.device_to_host() np.testing.assert_allclose(data, np.asarray(inp)) np.testing.assert_allclose( np.broadcast_to(data[2:], shape=(2, 2, 8)), np.asarray(outp)[:, :, 1:-1] ) np.testing.assert_allclose(0.0, np.asarray(outp)[:, :, 0]) np.testing.assert_allclose(0.0, np.asarray(outp)[:, :, -1])
def netcdf_to_gt4py(self, var): """Convert a netcdf variable to gt4py storage.""" axes = [d.name for d in var.get_dims()] idim = self._find_fuzzy(axes, "xaxis") jdim = self._find_fuzzy(axes, "yaxis") kdim = self._find_fuzzy(axes, "zaxis") if np.prod(var.shape) > 1: permutation = [dim for dim in (idim, jdim, kdim) if dim] # put other axes at the back for i in range(len(axes)): if i not in permutation: permutation.append(i) ndarray = np.squeeze(np.transpose(var, permutation)) if len(ndarray.shape) == 3: origin = (self.ng, self.ng, 0) elif len(ndarray.shape) == 2: origin = (self.ng, self.ng) else: origin = (0, ) return gt_storage.from_array(ndarray, backend, default_origin=origin, shape=ndarray.shape) else: return var[0].item()
def make_storage_data( data: Field, shape: Optional[Tuple[int, int, int]] = None, *, origin: Tuple[int, int, int] = origin, dtype: DTypes = np.float64, mask: Tuple[bool, bool, bool] = (True, True, True), start: Tuple[int, int, int] = (0, 0, 0), dummy: Optional[Tuple[int, int, int]] = None, axis: int = 2, ) -> Field: """Create a new gt4py storage from the given data. Args: data: Data array for new storage shape: Shape of the new storage origin: Default origin for gt4py stencil calls dtype: Data type mask: Tuple indicating the axes used when initializing the storage start: Starting points for slices in data copies dummy: Dummy axes axis: Axis for 2D to 3D arrays Returns: Field[dtype]: New storage Examples: 1) ptop = utils.make_storage_data(top_p, q4_1.shape) 2) ws3 = utils.make_storage_data(ws3[:, :, -1], shape, origin=(0, 0, 0)) 3) data_dict[names[i]] = make_storage_data( data[:, :, :, i], shape, origin=origin, start=start, dummy=dummy, axis=axis, ) """ n_dims = len(data.shape) if shape is None: shape = data.shape if n_dims == 1: data = _make_storage_data_1d(data, shape, start, dummy, axis) elif n_dims == 2: data = _make_storage_data_2d(data, shape, start, dummy, axis) else: data = _make_storage_data_3d(data, shape, start) storage = gt_storage.from_array( data=data, backend=global_config.get_backend(), default_origin=origin, shape=shape, dtype=dtype, mask=mask, managed_memory=managed_memory, ) return storage
def netcdf_to_gt4py(self, var): """Convert a netcdf variable to gt4py storage.""" axes = [d.name for d in var.get_dims()] idim = self._find_fuzzy(axes, "xaxis") jdim = self._find_fuzzy(axes, "yaxis") kdim = self._find_fuzzy(axes, "zaxis") origin = (self.ng, self.ng, 0) if np.prod(var.shape) > 1: permutation = [dim for dim in (idim, jdim, kdim) if dim] # put other axes at the back for i in range(len(axes)): if i not in permutation: permutation.append(i) ndarray = np.squeeze(np.transpose(var, permutation)) if len(ndarray.shape) == 3: #origin = (self.ng, self.ng, 0) if ndarray.shape[2] < self.km: newarr = np.zeros( (self.full_domain_nx, self.full_domain_ny, self.km)) newarr[:, :, :ndarray.shape[2] - self.km] = ndarray ndarray = newarr elif len(ndarray.shape) == 2: #origin = (self.ng, self.ng) ndarray = np.repeat(ndarray[:, :, np.newaxis], self.km, axis=2) else: origin = (0, ) #print('FIELD', var.name, ndarray.shape) #if var.name == "pef": # ndarray[:] = 1.0e8 return gt_storage.from_array(ndarray, backend, default_origin=origin, shape=ndarray.shape) else: if var.name in ["q_con", "cappa"]: newarr = np.zeros( (self.full_domain_nx, self.full_domain_ny, self.km)) #newarr[:] = var[0].item() return gt_storage.from_array(newarr, backend, default_origin=(self.ng, self.ng, 0), shape=newarr.shape) else: return var[0].item()
def storage_from_array(self, array): return storage.from_array( array, shape=array.shape, backend=self.gt4py_backend, default_origin=(HALO, HALO, 0), mask=(True, True, True), managed_memory=True, )
def numpy_to_gt4py_storage_2D(arr, backend, k_depth): """convert numpy storage to gt4py storage""" data = np.reshape(arr, (arr.shape[0], 1, arr.shape[1])) if data.dtype == "bool": data = data.astype(np.int32) # Enforce that arrays are at least of length k_depth in the "k" direction if arr.shape[1] < k_depth: Z = np.zeros((arr.shape[0], 1, k_depth - arr.shape[1])) data = np.dstack((data, Z)) return gt_storage.from_array(data, backend=backend, default_origin=(0, 0, 0))
def run_test_slices(backend): default_origin = (1, 1, 1) shape = (10, 10, 10) array = np.random.randn(*shape) stor = gt_store.from_array( array, backend=backend, dtype=np.float64, default_origin=default_origin, shape=shape ) sliced = stor[::2, ::2, ::2] assert (sliced.view(np.ndarray) == array[::2, ::2, ::2]).all() sliced[...] = array[::2, ::2, ::2]
def test_cuda_array_interface(): storage = gt_store.from_array( cp.random.randn(5, 5, 5), backend="gtcuda", dtype=np.float64, default_origin=(1, 1, 1), shape=(5, 5, 5), ) cupy_array = cp.array(storage) assert (cupy_array == storage).all()
def test_transpose(backend="gtmc"): default_origin = (1, 1, 1) shape = (10, 10, 10) array = np.random.randn(*shape) stor = gt_store.from_array( array, default_origin=default_origin, backend=backend, dtype=np.float64 ) transposed = np.transpose(stor, axes=(0, 1, 2)) assert transposed.strides == stor.strides assert transposed.is_stencil_view transposed = np.transpose(stor, axes=(2, 1, 0)) assert not transposed.is_stencil_view
def init_fields(self, data, backend): self.nx = data.draw(hyp_st.integers(min_value=7, max_value=32), label="nx") self.ny = data.draw(hyp_st.integers(min_value=7, max_value=32), label="ny") self.nz = data.draw(hyp_st.integers(min_value=1, max_value=32), label="nz") shape = (self.nx, self.ny, self.nz) self.in_phi = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(0, 0, 0), dtype=float, ) self.in_u = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(0, 0, 0), dtype=float, ) self.in_v = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(0, 0, 0), dtype=float, ) self.tmp_phi = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(1, 1, 0), dtype=float, ) self.out_phi = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(3, 3, 0), dtype=float, ) self.alpha = 1 / 32
def test_optional_arg_provide(backend): @gtscript.stencil(backend=backend) def stencil( inp: gtscript.Field[np.float64], unused_field: gtscript.Field[np.float64], outp: gtscript.Field[np.float64], unused_par: float, ): with computation(PARALLEL), interval(...): outp = inp # noqa F841: local variable 'outp' is assigned to but never used frozen_stencil = stencil.freeze( domain=(3, 3, 10), origin={ "inp": (2, 2, 0), "outp": (2, 2, 0), "unused_field": (0, 0, 0) }, ) inp = gt_storage.from_array(data=7.0, dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend=backend) outp = gt_storage.zeros(dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend=backend) unused_field = gt_storage.zeros(dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend=backend) inp.host_to_device() outp.host_to_device() @dace.program( device=dace.DeviceType.GPU if "gpu" in backend else dace.DeviceType.CPU ) def call_frozen_stencil(): frozen_stencil(inp=inp, unused_field=unused_field, outp=outp, unused_par=7.0) call_frozen_stencil() outp.device_to_host(force=True) assert np.allclose(inp, 7.0) assert np.allclose(np.asarray(outp)[2:5, 2:5, :], 7.0) assert np.sum(np.asarray(outp), axis=(0, 1, 2)) == 90 * 7.0
def test_auto_sync_storage(): BACKEND = "gtcuda" @stencil(backend=BACKEND, device_sync=False) def swap_stencil( inp: Field[float], # type: ignore out: Field[float], # type: ignore ): with computation(PARALLEL), interval(...): tmp = inp inp = out out = tmp shape = (5, 5, 5) q0 = gt_store.from_array( cp.zeros(shape), backend=BACKEND, dtype=np.float64, default_origin=(0, 0, 0), shape=shape, managed_memory=True, ) q1 = gt_store.from_array( cp.ones(shape), backend=BACKEND, dtype=np.float64, default_origin=(0, 0, 0), shape=shape, managed_memory=True, ) assert not gt_store.storage.GPUStorage.get_modified_storages() swap_stencil(q0, q1) assert len(gt_store.storage.GPUStorage.get_modified_storages()) == 2 q0.device_to_host() assert not gt_store.storage.GPUStorage.get_modified_storages()
def test_sum_gpu(): i1 = 3 i2 = 4 jslice = slice(3, 4, None) shape = (5, 5, 5) q1 = gt_store.from_array( cp.zeros(shape), backend="gtcuda", dtype=np.float64, default_origin=(0, 0, 0), shape=shape, ) q2 = gt_store.from_array( cp.ones(shape), backend="gtcuda", dtype=np.float64, default_origin=(0, 0, 0), shape=shape, ) q1[i1 : i2 + 1, jslice, 0] = cp.sum(q2[i1 : i2 + 1, jslice, :], axis=2)
def test_numpy_patch(): storage = gt_store.from_array(np.random.randn(5, 5, 5), default_origin=(1, 1, 1), backend="gtmc") class npsub(np.ndarray): pass numpy_array = np.ones((3, 3, 3)) matrix = np.ones((3, 3)).view(npsub) assert isinstance(np.asarray(storage), np.ndarray) assert isinstance(np.asarray(numpy_array), np.ndarray) assert isinstance(np.asarray(matrix), np.ndarray) assert isinstance(np.asanyarray(storage), type(storage)) assert isinstance(np.asanyarray(numpy_array), np.ndarray) assert isinstance(np.asanyarray(matrix), npsub) assert isinstance(np.array(storage), np.ndarray) assert isinstance(np.array(matrix), np.ndarray) assert isinstance(np.array(numpy_array), np.ndarray) # apply numpy patch gt_store.prepare_numpy() assert isinstance(np.asarray(storage), type(storage)) assert isinstance(np.asarray(numpy_array), np.ndarray) assert isinstance(np.asarray(matrix), np.ndarray) assert isinstance(np.array(matrix), np.ndarray) assert isinstance(np.array(numpy_array), np.ndarray) try: np.array(storage) except RuntimeError: pass else: assert False # undo patch gt_store.restore_numpy() assert isinstance(np.asarray(storage), np.ndarray) assert isinstance(np.asarray(numpy_array), np.ndarray) assert isinstance(np.asarray(matrix), np.ndarray) assert isinstance(np.array(storage), np.ndarray) assert isinstance(np.array(matrix), np.ndarray) assert isinstance(np.array(numpy_array), np.ndarray)
def test_deepcopy_gpu_unmanaged(method, backend="gtcuda"): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array( np.random.randn(*shape), default_origin=default_origin, backend=backend, managed_memory=False, ) import copy if method == "deepcopy": stor_copy = copy.deepcopy(stor) elif method == "copy_method": stor_copy = stor.copy() else: raise ValueError(f"Test not implemented for copying using '{method}'") assert stor is not stor_copy assert stor._sync_state is not stor_copy._sync_state assert stor._raw_buffer.ctypes.data != stor_copy._raw_buffer.ctypes.data if stor._raw_buffer.ctypes.data < stor_copy._raw_buffer.ctypes.data: assert ( stor._raw_buffer.ctypes.data + len(stor._raw_buffer) <= stor_copy._raw_buffer.ctypes.data ) else: assert ( stor._raw_buffer.ctypes.data + len(stor._raw_buffer) >= stor_copy._raw_buffer.ctypes.data ) assert stor._device_raw_buffer.data.ptr != stor_copy._device_raw_buffer.data.ptr if stor._device_raw_buffer.data.ptr < stor_copy._device_raw_buffer.data.ptr: assert ( stor._device_raw_buffer.data.ptr + len(stor._device_raw_buffer) <= stor_copy._device_raw_buffer.data.ptr ) else: assert ( stor._device_raw_buffer.data.ptr + len(stor._device_raw_buffer) >= stor_copy._device_raw_buffer.data.ptr ) np.testing.assert_equal(stor_copy.view(np.ndarray), stor.view(np.ndarray)) assert (stor._device_field[...] == stor_copy._device_field[...]).all()
def compile_and_run_vertical_regions(backend, id_version): import gt4py.definitions as gt_defs from gt4py import ir as gt_ir from gt4py.definitions import StencilID iir = make_test_vertical_regions() module_name = "_test_module." + "test_vertical_regions" stencil_name = "test_vertical_regions_stencil" options = gt_defs.BuildOptions(name=stencil_name, module=module_name, rebuild=False) stencil_id = StencilID("{}.{}".format(options.module, options.name), id_version) stencil_class = backend.load(stencil_id, None, options) if stencil_class is None: stencil_class = backend.generate(stencil_id, iir, None, options) stencil_implementation = stencil_class() field_out = gt_store.from_array( np.zeros([d + 2 * o for d, o in zip((10, 10, 10), (0, 0, 0))]), default_origin=(0, 0, 0), shape=(10, 10, 10), dtype=np.float_, mask=[True, True, True], backend=backend.name, ) field_out_ref = np.zeros( [d + 2 * o for d, o in zip((10, 10, 10), (0, 0, 0))]) field_out_ref[:, :, 0] = 1.0 field_out_ref[:, :, 1:-1] = 2.0 field_out_ref[:, :, -1:] = 3.0 args = dict(_origin_=dict(out=(0, 0, 0)), _domain_=(10, 10, 10), out=field_out) if hasattr(field_out, "host_to_device"): field_out.host_to_device() stencil_implementation.run(**args, exec_info=None) if hasattr(field_out, "device_to_host"): field_out.device_to_host(force=True) return (field_out, field_out_ref)
def test_asarray(self): storage = gt_store.from_array( np.random.randn(5, 5, 5), default_origin=(1, 1, 1), backend="gtmc" ) class NDArraySub(np.ndarray): pass numpy_array = np.ones((3, 3, 3)) matrix = np.ones((3, 3)).view(NDArraySub) assert isinstance(np.asarray(storage), np.ndarray) assert isinstance(np.asarray(numpy_array), np.ndarray) assert isinstance(np.asarray(matrix), np.ndarray) assert isinstance(np.asanyarray(storage), type(storage)) assert isinstance(np.asanyarray(numpy_array), np.ndarray) assert isinstance(np.asanyarray(matrix), NDArraySub) assert isinstance(np.array(storage), np.ndarray) assert isinstance(np.array(matrix), np.ndarray) assert isinstance(np.array(numpy_array), np.ndarray) # apply numpy patch gt_store.prepare_numpy() try: assert isinstance(np.asarray(storage), type(storage)) assert isinstance(np.asarray(numpy_array), np.ndarray) assert isinstance(np.asarray(matrix), np.ndarray) assert isinstance(np.array(matrix), np.ndarray) assert isinstance(np.array(numpy_array), np.ndarray) with pytest.raises(RuntimeError): np.array(storage) finally: # undo patch gt_store.restore_numpy() assert isinstance(np.asarray(storage), np.ndarray) assert isinstance(np.asarray(numpy_array), np.ndarray) assert isinstance(np.asarray(matrix), np.ndarray) assert isinstance(np.array(storage), np.ndarray) assert isinstance(np.array(matrix), np.ndarray) assert isinstance(np.array(numpy_array), np.ndarray)
def run_test_view(backend): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array( np.random.randn(*shape), default_origin=default_origin, backend=backend ) stor.view(type(stor)) if gt_backend.from_name(backend).storage_info["layout_map"]([True] * 3) != (0, 1, 2): try: np.ones((10, 10, 10)).view(type(stor)) except RuntimeError: pass except Exception as e: raise e else: raise Exception tmp_view = stor[::2, ::2, ::2] assert not tmp_view._is_consistent(stor) assert not tmp_view.is_stencil_view
def test_deepcopy_cpu(backend): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array(np.random.randn(*shape), default_origin=default_origin, backend=backend) import copy stor_copy = copy.deepcopy(stor) assert stor is not stor_copy assert stor._raw_buffer.ctypes.data != stor_copy._raw_buffer.ctypes.data if stor._raw_buffer.ctypes.data < stor_copy._raw_buffer.ctypes.data: assert (stor._raw_buffer.ctypes.data + len(stor._raw_buffer) <= stor_copy._raw_buffer.ctypes.data) else: assert (stor._raw_buffer.ctypes.data + len(stor._raw_buffer) >= stor_copy._raw_buffer.ctypes.data) np.testing.assert_equal(stor_copy.view(np.ndarray), stor.view(np.ndarray))
def test_deepcopy_gpu(backend="gtcuda"): default_origin = (1, 1, 1) shape = (10, 10, 10) stor = gt_store.from_array( np.random.randn(*shape), default_origin=default_origin, backend=backend, managed_memory=True, ) import copy stor_copy = copy.deepcopy(stor) assert stor is not stor_copy assert stor._raw_buffer.data.ptr != stor_copy._raw_buffer.data.ptr if stor._raw_buffer.data.ptr < stor_copy._raw_buffer.data.ptr: assert stor._raw_buffer.data.ptr + len( stor._raw_buffer) <= stor_copy._raw_buffer.data.ptr else: assert stor._raw_buffer.data.ptr + len( stor._raw_buffer) >= stor_copy._raw_buffer.data.ptr np.testing.assert_equal(stor_copy.view(np.ndarray), stor.view(np.ndarray))
def test_nondace_raises(): @gtscript.stencil(backend="numpy") def numpy_stencil(inp: gtscript.Field[np.float64], outp: gtscript.Field[np.float64]): with computation(PARALLEL), interval(...): outp = inp # noqa F841: local variable 'outp' is assigned to but never used frozen_stencil = numpy_stencil.freeze(domain=(3, 3, 3), origin={ "inp": (0, 0, 0), "outp": (0, 0, 0) }) inp = gt_storage.from_array( data=7.0, dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend="numpy", ) outp = gt_storage.zeros( dtype=np.float64, shape=(10, 10, 10), default_origin=(0, 0, 0), backend="numpy", ) @dace.program def call_frozen_stencil(): frozen_stencil(inp=inp, outp=outp) with pytest.raises( TypeError, match=re.escape( "Only dace backends are supported in DaCe-orchestrated programs." ' (found "numpy")'), ): call_frozen_stencil()
def test_implementation(self, test, parameters_dict): """Test computed values for implementations generated for all *backends* and *stencil suites*. The generated implementations are reused from previous tests by means of a :class:`utils.ImplementationsDB` instance shared at module scope. """ # backend = "debug" cls = type(self) implementation_list = test["implementations"] if not implementation_list: pytest.skip( "Cannot perform validation tests, since there are no valid implementations." ) for implementation in implementation_list: if not isinstance(implementation, StencilObject): raise RuntimeError( "Wrong function got from implementations_db cache!") fields, exec_info = parameters_dict # Domain from gt4py.definitions import Shape from gt4py.ir.nodes import Index origin = cls.origin shapes = {} for name, field in [(k, v) for k, v in fields.items() if isinstance(v, np.ndarray)]: shapes[name] = Shape(field.shape) max_domain = Shape([sys.maxsize] * implementation.domain_info.ndims) for name, shape in shapes.items(): upper_boundary = Index( [b[1] for b in cls.symbols[name].boundary]) max_domain &= shape - (Index(origin) + upper_boundary) domain = max_domain max_boundary = ((0, 0), (0, 0), (0, 0)) for name, info in implementation.field_info.items(): if isinstance(info, gt_definitions.FieldInfo): max_boundary = tuple( (max(m[0], abs(b[0])), max(m[1], b[1])) for m, b in zip(max_boundary, info.boundary)) new_boundary = tuple( (max(abs(b[0]), abs(mb[0])), max(abs(b[1]), abs(mb[1]))) for b, mb in zip(cls.max_boundary, max_boundary)) shape = None for name, field in fields.items(): if isinstance(field, np.ndarray): assert field.shape == (shape if shape is not None else field.shape) shape = field.shape patched_origin = tuple(nb[0] for nb in new_boundary) patching_origin = tuple(po - o for po, o in zip(patched_origin, origin)) patched_shape = tuple(nb[0] + nb[1] + d for nb, d in zip(new_boundary, domain)) patching_slices = [ slice(po, po + s) for po, s in zip(patching_origin, shape) ] for k, v in implementation.constants.items(): sys.modules[self.__module__].__dict__[k] = v inputs = {} for k, f in fields.items(): if isinstance(f, np.ndarray): patched_f = np.empty(shape=patched_shape) patched_f[patching_slices] = f inputs[k] = gt_storage.from_array( patched_f, dtype=test["definition"].__annotations__[k], shape=patched_f.shape, default_origin=patched_origin, backend=test["backend"], ) else: inputs[k] = f validation_fields = { name: np.array(field, copy=True) for name, field in inputs.items() } implementation(**inputs, origin=patched_origin, exec_info=exec_info) domain = exec_info["domain"] validation_origins = { name: tuple( nb[0] - g[0] for nb, g in zip(new_boundary, cls.symbols[name].boundary)) for name in implementation.field_info.keys() } validation_shapes = { name: tuple(d + g[0] + g[1] for d, g in zip(domain, cls.symbols[name].boundary)) for name in implementation.field_info.keys() } validation_field_views = { name: field[tuple( slice(o, o + s) for o, s in zip(validation_origins[name], validation_shapes[name]))] if name in implementation.field_info else field # parameters for name, field in validation_fields.items() } cls.validation( **validation_field_views, domain=domain, origin={ name: tuple(b[0] for b in cls.symbols[name].boundary) for name in validation_fields if name in implementation.field_info }, ) # Test values for (name, value), (expected_name, expected_value) in zip(inputs.items(), validation_fields.items()): if isinstance(fields[name], np.ndarray): domain_slice = [ slice(new_boundary[d][0], new_boundary[d][0] + domain[d]) for d in range(len(domain)) ] np.testing.assert_allclose( value.data[domain_slice], expected_value[domain_slice], rtol=RTOL, atol=ATOL, equal_nan=EQUAL_NAN, err_msg="Wrong data in output field '{name}'".format( name=name), )
def test_implementation(self, test, parameters_dict): """Test computed values for implementations generated for all *backends* and *stencil suites*. The generated implementations are reused from previous tests by means of a :class:`utils.ImplementationsDB` instance shared at module scope. """ # backend = "debug" cls = type(self) implementation_list = test["implementations"] if not implementation_list: pytest.skip( "Cannot perform validation tests, since there are no valid implementations." ) for implementation in implementation_list: if not isinstance(implementation, StencilObject): raise RuntimeError("Wrong function got from implementations_db cache!") fields, exec_info = parameters_dict for k, v in implementation._gt_constants_.items(): sys.modules[self.__module__].__dict__[k] = v inputs = {} for k, f in fields.items(): if isinstance(f, np.ndarray): inputs[k] = gt_storage.from_array( f, dtype=test["definition"].__annotations__[k], shape=f.shape, default_origin=cls.origin, backend=test["backend"], ) else: inputs[k] = f validation_fields = {name: np.array(field) for name, field in fields.items()} implementation(**inputs, origin=cls.origin, exec_info=exec_info) domain = exec_info["domain"] validation_origins = { name: tuple( b[0] - g[0] for b, g in zip(cls.max_boundary, implementation.field_info[name].boundary) ) for name in fields.keys() if name in implementation.field_info } validation_shapes = { name: tuple( d + g[0] + g[1] for d, g in zip(domain, implementation.field_info[name].boundary) ) for name, field in fields.items() if name in implementation.field_info } validation_field_views = { name: field[ tuple( slice(o, o + s) for o, s in zip(validation_origins[name], validation_shapes[name]) ) ] if name in implementation.field_info else field # parameters for name, field in validation_fields.items() } cls.validation( **validation_field_views, domain=domain, origin={ name: implementation.field_info[name].boundary.lower_indices for name in validation_fields if name in implementation.field_info }, ) # Test values for (name, value), (expected_name, expected_value) in zip( inputs.items(), validation_fields.items() ): if isinstance(fields[name], np.ndarray): np.testing.assert_allclose( value.data, expected_value, rtol=RTOL, atol=ATOL, equal_nan=EQUAL_NAN, err_msg="Wrong data in output field '{name}'".format(name=name), )