示例#1
0
文件: test_cli.py 项目: havogt/gt4py
def test_write_computation_src(tmp_path, simple_stencil):
    builder = cli.GTScriptBuilder(simple_stencil,
                                  output_path=tmp_path,
                                  backend=backend.from_name("debug"))
    toplevel = "test_write_computation_src"
    test_src = {
        toplevel: {
            "include": {
                "header.hpp": "#pragma once"
            },
            "src": {
                "main.cpp": "#include <header.hpp>"
            },
        }
    }
    builder.write_computation_src(tmp_path, test_src)
    top = tmp_path / toplevel
    inc = top / "include"
    header = inc / "header.hpp"
    src = top / "src"
    main = src / "main.cpp"
    assert top.exists() and top.is_dir()
    assert inc.exists() and inc.is_dir()
    assert src.exists() and src.is_dir()
    assert header.exists() and header.read_text(
    ) == test_src[toplevel]["include"]["header.hpp"]
    assert main.exists() and main.read_text(
    ) == test_src[toplevel]["src"]["main.cpp"]
示例#2
0
文件: cli.py 项目: stubbiali/gt4py
 def enabled_backend_cls_from_name(
         backend_name: str) -> Optional[Type[CLIBackendMixin]]:
     """Check if a given backend is enabled for CLI."""
     backend_cls = gt_backend.from_name(backend_name)
     if not issubclass(backend_cls, CLIBackendMixin):
         return None
     return backend_cls
示例#3
0
def test_invalid_temporary_access(definition):
    builder = StencilBuilder(definition, backend=from_name("gtc:numpy"))
    with pytest.raises(
            TypeError,
            match="Invalid access with offset in k to temporary field tmp."):
        k_boundary = compute_k_boundary(
            builder.gtir_pipeline.full(skip=[prune_unused_parameters]))
示例#4
0
def load_stencil(
    frontend_name: str,
    backend_name: str,
    definition_func: StencilFunc,
    externals: Dict[str, Any],
    build_options: "BuildOptions",
) -> Type["StencilObject"]:
    """Generate a new class object implementing the provided definition."""
    # Load components
    backend_cls = gt_backend.from_name(backend_name)
    if backend_cls is None:
        raise ValueError(
            "Unknown backend name ({name})".format(name=backend_name))

    frontend = gt_frontend.from_name(frontend_name)
    if frontend is None:
        raise ValueError("Invalid frontend specification ({name})".format(
            name=frontend_name))

    builder = StencilBuilder(definition_func,
                             options=build_options,
                             backend=backend_cls,
                             frontend=frontend).with_externals(externals)

    return builder.build()
示例#5
0
def build_def_ir_stencil(name,
                         options,
                         backend="gtx86",
                         *,
                         id_version="xxxxxx"):
    if isinstance(backend, str):
        backend = gt_backend.from_name(backend)
    if not issubclass(backend, gt_backend.Backend):
        raise TypeError(
            "Backend must be a backend identifier string or a gt4py Backend class."
        )

    def_ir_factory = REGISTRY[name]
    def_ir = def_ir_factory()
    iir = gt_analysis.transform(def_ir, options)
    import tests.test_iir.iir_stencil_definitions as iir_stencil_definitions

    ref_iir = iir_stencil_definitions.make_vertical_advection_dycore()
    stencil_id = StencilID("{}.{}".format(options.module, options.name),
                           id_version)

    if options.rebuild:
        # Force recompilation
        stencil_class = None
    else:
        # Use cached version (if id_version matches)
        stencil_class = backend.load(stencil_id, None, options)

    if stencil_class is None:
        stencil_class = backend.generate(stencil_id, iir, None, options)

    stencil_implementation = stencil_class()

    return stencil_implementation
示例#6
0
 def test_start_offset(self, backend):
     backend_cls = gt_backend.from_name(backend)
     default_origin = (1, 2, 3)
     stor = gt_store.ones(
         backend=backend,
         managed_memory=False,
         shape=(3, 7, 13),
         default_origin=default_origin,
         dtype=np.float64,
     )
     descriptor: dace.data.Array = stor.__descriptor__()
     raveled = TestDescriptor.ravel_with_padding(stor)[descriptor.start_offset :]
     if backend_cls.storage_info["device"] == "gpu":
         assert raveled.data.ptr % (backend_cls.storage_info["alignment"] * stor.itemsize) == 0
         assert (
             backend_cls.storage_info["alignment"] == 1
             or cp.asarray(stor).data.ptr
             % (backend_cls.storage_info["alignment"] * stor.itemsize)
             != 0
         )
     else:
         assert (
             raveled.ctypes.data % (backend_cls.storage_info["alignment"] * stor.itemsize) == 0
         )
         assert (
             backend_cls.storage_info["alignment"] == 1
             or stor.ctypes.data % (backend_cls.storage_info["alignment"] * stor.itemsize) != 0
         )
示例#7
0
    def subtest_exec_info(self, exec_info):
        assert "call_start_time" in exec_info
        assert "call_end_time" in exec_info
        assert exec_info["call_end_time"] > exec_info["call_start_time"]

        assert "run_start_time" in exec_info
        assert exec_info["run_start_time"] > exec_info["call_start_time"]
        assert "run_end_time" in exec_info
        assert exec_info["run_end_time"] > exec_info["run_start_time"]
        assert exec_info["call_end_time"] > exec_info["run_end_time"]

        if gt_backend.from_name(self.backend).languages["computation"] == "c++":
            assert "run_cpp_start_time" in exec_info
            assert "run_cpp_end_time" in exec_info
            # note: do not compare the outputs of python and c++ stopwatches
            # consider only deltas

        assert "origin" in exec_info
        assert exec_info["origin"] == {
            "_all_": (3, 3, 0),
            "in_phi": (3, 3, 0),
            "out_phi": (3, 3, 0),
        }

        assert "domain" in exec_info
        assert exec_info["domain"] == (self.nx - 6, self.ny - 6, self.nz)
示例#8
0
def test_k_bounds(definition, expected_k_bounds):
    builder = StencilBuilder(definition, backend=from_name("debug"))
    k_boundary = compute_k_boundary(builder.gtir_pipeline.full(skip=[prune_unused_parameters]))[
        "field_b"
    ]

    assert expected_k_bounds == k_boundary
示例#9
0
文件: suites.py 项目: egparedes/gt4py
    def parametrize_implementation_tests(cls_name, bases, cls_dict):
        implementation_strategy_factories = cls_dict[
            "implementation_strategy_factories"]
        global_boundaries = cls_dict["global_boundaries"]

        def implementation_test_wrapper(self, test, implementation_strategy):
            @hyp.given(hypothesis_data=implementation_strategy())
            def hyp_wrapper(test_hyp, hypothesis_data):
                bases[0]._test_implementation(self, test_hyp, hypothesis_data)

            hyp_wrapper(test)

        runtime_pytest_params = []
        for test in cls_dict["tests"]:
            if test["suite"] == cls_name:
                marks = ([pytest.mark.requires_gpu] if gt_backend.from_name(
                    test["backend"]).storage_info["device"] == "gpu" else ())
                name = test["backend"]
                name += "".join(f"_{key}_{value}"
                                for key, value in test["constants"].items())
                name += "".join("_{}_{}".format(key, value.name)
                                for key, value in test["dtypes"].items())
                runtime_pytest_params.append(
                    pytest.param(
                        test,
                        composite_implementation_strategy_factory(
                            test["dtypes"], implementation_strategy_factories,
                            global_boundaries),
                        marks=marks,
                        id=name,
                    ))

        cls_dict["test_implementation"] = pytest.mark.parametrize(
            ("test", "implementation_strategy"),
            runtime_pytest_params)(implementation_test_wrapper)
示例#10
0
文件: loader.py 项目: jdahm/gt4py
def load_stencil(frontend_name, backend_name, definition_func, externals, options):
    """Generate a new class object implementing the provided definition.
    """

    # Load components
    backend = gt_backend.from_name(backend_name)
    if backend is None:
        raise ValueError("Unknown backend name ({name})".format(name=backend_name))

    frontend = gt_frontend.from_name(frontend_name)
    if frontend is None:
        raise ValueError("Invalid frontend specification ({name})".format(name=frontend_name))

    # Create ID
    options_id = backend.get_options_id(options)
    stencil_id = frontend.get_stencil_id(
        options.qualified_name, definition_func, externals, options_id
    )

    # Load or generate class
    stencil_class = None if options.rebuild else backend.load(stencil_id, definition_func, options)
    if stencil_class is None:
        definition_ir = frontend.generate(definition_func, externals, options)
        implementation_ir = gt_analysis.transform(definition_ir, options)
        stencil_class = backend.build(stencil_id, implementation_ir, definition_func, options)

    return stencil_class
示例#11
0
    def __new__(cls, shape, dtype, backend, default_origin, mask=None):
        """"

        Parameters
        ----------

        shape: tuple of ints
            the shape of the storage

        dtype: data type compatible with numpy dtypes
            supported are the floating point and integer dtypes of numpy

        backend: string, backend identifier
            Currently possible: debug, numpy, gtx86, gtmc, gtcuda

        default_origin: tuple of ints
            determines the point to which the storage memory address is aligned.
            for performance, this should be the coordinates of the most common origin
            at call time.
            when calling a stencil and no origin is specified, the default_origin is used.

        mask: list of booleans
            False entries indicate that the corresponding dimension is masked, i.e. the storage
            has reduced dimension and reading and writing from offsets along this axis acces the same element.
        """

        if mask is None:
            mask = [True] * len(shape)
        default_origin = storage_utils.normalize_default_origin(
            default_origin, mask)
        shape = storage_utils.normalize_shape(shape, mask)

        if not backend in gt_backend.REGISTRY:
            ValueError("Backend must be in {}.".format(gt_backend.REGISTRY))

        alignment = gt_backend.from_name(backend).storage_info["alignment"]
        layout_map = gt_backend.from_name(backend).storage_info["layout_map"](
            mask)

        obj = cls._construct(backend, np.dtype(dtype), default_origin, shape,
                             alignment, layout_map)
        obj._backend = backend
        obj.is_stencil_view = True
        obj._mask = mask
        obj._check_data()

        return obj
示例#12
0
文件: suites.py 项目: stubbiali/gt4py
    def __new__(cls, cls_name, bases, cls_dict):
        if cls_dict.get("_skip_", False):  # skip metaclass magic
            return super().__new__(cls, cls_name, bases, cls_dict)
        # Grab members inherited from base classes

        missing_members = cls.required_members - cls_dict.keys()

        for key in missing_members:
            for base in bases:
                if hasattr(base, key):
                    cls_dict[key] = getattr(base, key)
                    break

        dtypes = cls_dict["dtypes"]
        if isinstance(dtypes, collections.abc.Sequence):
            dtypes = {tuple(cls_dict["symbols"].keys()): dtypes}
        cls_dict["dtypes"] = standardize_dtype_dict(dtypes)
        cls_dict["ndims"] = len(cls_dict["domain_range"])

        # Filter out unsupported backends
        cls_dict["backends"] = [
            backend for backend in cls_dict["backends"]
            if gt_backend.from_name(backend if isinstance(backend, str) else
                                    backend.values[0]) is not None
        ]

        cls._validate_new_args(cls_name, cls_dict)

        # Extract input and parameter names
        input_names = []
        parameter_names = []
        definition_signature = inspect.signature(cls_dict["definition"])
        validation_signature = inspect.signature(cls_dict["validation"])
        for (def_name, def_pobj), (val_name, val_pobj) in zip(
                definition_signature.parameters.items(),
                validation_signature.parameters.items()):
            if def_name != val_name or def_pobj.kind != val_pobj.kind:
                raise ValueError(
                    "Incompatible signatures for 'definition' and 'validation' functions"
                )

            if def_pobj.kind == inspect.Parameter.KEYWORD_ONLY:
                parameter_names.append(def_name)
                if def_pobj.default != inspect.Parameter.empty:
                    assert def_pobj.default == val_pobj.default
            else:
                input_names.append(def_name)

        cls.collect_symbols(cls_name, cls_dict)

        assert set(input_names + parameter_names) == set(
            cls_dict["implementation_strategy_factories"].keys()
        ), "Missing or invalid keys in 'symbols' mapping (generated: {})".format(
            cls_dict["implementation_strategy_factories"].keys())

        cls.parametrize_generation_tests(cls_name, cls_dict)
        cls.parametrize_implementation_tests(cls_name, cls_dict)

        return super().__new__(cls, cls_name, bases, cls_dict)
示例#13
0
def test_daa(definition, valid):
    builder = StencilBuilder(definition, backend=from_name("debug"))
    gtir_stencil_expr = builder.gtir_pipeline.full()
    invalid_accesses = daa.analyze(gtir_stencil_expr)
    if valid:
        assert len(invalid_accesses) == 0
    else:
        assert len(invalid_accesses) == 1 and invalid_accesses[0].name == "tmp"
示例#14
0
    def __new__(cls, shape, dtype, backend, default_origin, mask=None):
        """
        Parameters
        ----------

        shape: tuple of ints
            the shape of the storage

        dtype: data type compatible with numpy dtypes
            supported are the floating point and integer dtypes of numpy

        backend: string, backend identifier
            Currently possible: debug, numpy, gtx86, gtmc, gtcuda

        default_origin: tuple of ints
            determines the point to which the storage memory address is aligned.
            for performance, this should be the coordinates of the most common origin
            at call time.
            when calling a stencil and no origin is specified, the default_origin is used.

        mask: list of bools or list of spatial axes
            in a list of bools, ``False`` entries indicate that the corresponding dimension is masked, i.e. the storage
            has reduced dimension and reading and writing from offsets along this axis access the same element.
            In a list of spatial axes (IJK), a boolean mask will be generated with ``True`` entries for all
            dimensions except for the missing spatial axes names.
        """

        default_origin, shape, dtype, mask = storage_utils.normalize_storage_spec(
            default_origin, shape, dtype, mask)

        _error_on_invalid_backend(backend)

        alignment = gt_backend.from_name(backend).storage_info["alignment"]
        layout_map = gt_backend.from_name(backend).storage_info["layout_map"](
            mask)

        obj = cls._construct(backend, np.dtype(dtype), default_origin, shape,
                             alignment, layout_map)
        obj._backend = backend
        obj.is_stencil_view = True
        obj._mask = mask
        obj._check_data()

        return obj
示例#15
0
    def subtest_stencil_info(self,
                             exec_info,
                             stencil_info,
                             last_called_stencil=False):
        assert "ncalls" in stencil_info
        assert stencil_info["ncalls"] == self.nt

        assert "call_start_time" in stencil_info
        assert "call_end_time" in stencil_info
        assert stencil_info["call_end_time"] > stencil_info["call_start_time"]
        assert "call_time" in stencil_info
        assert "total_call_time" in stencil_info
        assert np.isclose(
            stencil_info["call_time"],
            stencil_info["call_end_time"] - stencil_info["call_start_time"],
        )
        if self.nt == 1:
            assert stencil_info["total_call_time"] == stencil_info["call_time"]
        else:
            assert stencil_info["total_call_time"] > stencil_info["call_time"]
        if last_called_stencil:
            assert stencil_info["call_start_time"] == exec_info[
                "call_start_time"]
            assert stencil_info["call_end_time"] == exec_info["call_end_time"]

        assert "run_time" in stencil_info
        if last_called_stencil:
            assert np.isclose(
                stencil_info["run_time"],
                exec_info["run_end_time"] - exec_info["run_start_time"],
            )
        assert stencil_info["call_time"] > stencil_info["run_time"]
        assert "total_run_time" in stencil_info
        if self.nt == 1:
            assert stencil_info["total_run_time"] == stencil_info["run_time"]
        else:
            assert stencil_info["total_run_time"] > stencil_info["run_time"]

        if gt_backend.from_name(
                self.backend).languages["computation"] == "c++":
            assert "run_cpp_time" in stencil_info
            if last_called_stencil:
                assert np.isclose(
                    stencil_info["run_cpp_time"],
                    exec_info["run_cpp_end_time"] -
                    exec_info["run_cpp_start_time"],
                )
            assert stencil_info["run_time"] > stencil_info["run_cpp_time"]
            assert "total_run_cpp_time" in stencil_info
            if self.nt == 1:
                assert stencil_info["total_run_cpp_time"] == stencil_info[
                    "run_cpp_time"]
            else:
                assert stencil_info["total_run_cpp_time"] > stencil_info[
                    "run_cpp_time"]
示例#16
0
    def _sdfg_add_arrays_and_edges(self, wrapper_sdfg, state, inner_sdfg,
                                   nsdfg, inputs, outputs, origins):
        device = gt_backend.from_name(self.backend).storage_info["device"]

        for name, array in inner_sdfg.arrays.items():
            if isinstance(array, dace.data.Array) and not array.transient:
                axes = self.field_info[name].axes

                shape = [f"__{name}_{axis}_size" for axis in axes
                         ] + [str(d) for d in self.field_info[name].data_dims]

                wrapper_sdfg.add_array(
                    name,
                    dtype=array.dtype,
                    strides=array.strides,
                    shape=shape,
                    storage=dace.StorageType.GPU_Global
                    if device == "gpu" else dace.StorageType.Default,
                )
                if isinstance(origins, tuple):
                    origin = [o for a, o in zip("IJK", origins) if a in axes]
                else:
                    origin = origins.get(name, origins.get("_all_", None))
                    if len(origin) == 3:
                        origin = [
                            o for a, o in zip("IJK", origin) if a in axes
                        ]

                subset_strs = [
                    f"{o - e}:{o - e + s}" for o, e, s in zip(
                        origin,
                        self.field_info[name].boundary.lower_indices,
                        inner_sdfg.arrays[name].shape,
                    )
                ]
                subset_strs += [
                    f"0:{d}" for d in self.field_info[name].data_dims
                ]
                if name in inputs:
                    state.add_edge(
                        state.add_read(name),
                        None,
                        nsdfg,
                        name,
                        dace.Memlet.simple(name, ",".join(subset_strs)),
                    )
                if name in outputs:
                    state.add_edge(
                        nsdfg,
                        name,
                        state.add_write(name),
                        None,
                        dace.Memlet.simple(name, ",".join(subset_strs)),
                    )
示例#17
0
def test_j():
    def stencil(field_a: gs.Field[float], field_b: gs.Field[float, gs.J]):
        with computation(PARALLEL), interval(...):
            field_a = field_b[1] + field_b[-2]

    builder = StencilBuilder(stencil, backend=from_name("debug"))
    old_ext = builder.implementation_ir.fields_extents
    legacy_ext = compute_legacy_extents(prepare_gtir(builder))

    for name, ext in old_ext.items():
        assert legacy_ext[name] == ext
示例#18
0
def test_single_k_offset():
    def stencil(field_a: gs.Field[float], field_b: gs.Field[float]):
        with computation(PARALLEL), interval(...):
            field_a = field_b[0, 0, 1]

    builder = StencilBuilder(stencil, backend=from_name("debug"))
    old_ext = builder.implementation_ir.fields_extents
    legacy_ext = compute_legacy_extents(prepare_gtir(builder),
                                        mask_inwards=True)

    for name, ext in old_ext.items():
        assert legacy_ext[name] == ext
示例#19
0
def empty(backend, default_origin, shape, dtype, mask=None, *, managed_memory=False):
    if gt_backend.from_name(backend).storage_info["device"] == "gpu":
        if managed_memory:
            storage_t = GPUStorage
        else:
            storage_t = ExplicitlySyncedGPUStorage
    else:
        storage_t = CPUStorage

    return storage_t(
        shape=shape, dtype=dtype, backend=backend, default_origin=default_origin, mask=mask
    )
示例#20
0
def test_generate_post_run(backend_name, mode):
    backend_cls = backend_registry[backend_name]
    builder = StencilBuilder(stencil_def, backend=backend_cls).with_externals(
        {"MODE": mode})
    args_data = make_args_data_from_gtir(builder.gtir_pipeline)

    module_generator = backend_cls.MODULE_GENERATOR_CLASS()
    module_generator.args_data = args_data
    source = module_generator.generate_post_run()

    if gt_backend.from_name(backend_name).storage_info["device"] == "cpu":
        assert source == ""
    else:
        assert source == "out._set_device_modified()"
示例#21
0
 def test_device(self, backend):
     backend_cls = gt_backend.from_name(backend)
     stor = gt_store.ones(
         backend=backend,
         managed_memory=False,
         shape=(3, 7, 13),
         default_origin=(1, 2, 3),
         dtype=np.float64,
     )
     descriptor: dace.data.Array = stor.__descriptor__()
     if backend_cls.storage_info["device"] == "gpu":
         assert descriptor.storage == dace.StorageType.GPU_Global
     else:
         assert descriptor.storage == dace.StorageType.CPU_Heap
示例#22
0
文件: cli.py 项目: stubbiali/gt4py
 def convert(self, value: str, param: Optional[click.Parameter],
             ctx: Optional[click.Context]) -> Tuple[str, Any]:
     backend = ctx.params["backend"] if ctx else gt_backend.from_name(
         "debug")
     name, value = self._try_split(value)
     if name.strip() not in backend.options:
         self.fail(
             f"Backend {backend.name} received unknown option: {name}!")
     try:
         value = self._convert_value(backend.options[name]["type"], value,
                                     param, ctx)
     except click.BadParameter as conversion_error:
         self.fail(
             f'Invalid value for backend option "{name}": {conversion_error.message}'  # noqa: B306
         )
     return (name, value)
示例#23
0
def test_offset_chain():
    def stencil(field_a: gs.Field[float], field_b: gs.Field[float]):
        with computation(PARALLEL), interval(...):
            field_a = field_b[1, 0, 1]
        with computation(PARALLEL), interval(...):
            field_b = field_a[1, 0, 0]
        with computation(PARALLEL), interval(...):
            tmp = field_b[0, -1, 0] + field_b[0, 1, 0]
            field_a = tmp[0, 0, 0] + tmp[0, 0, -1]

    builder = StencilBuilder(stencil, backend=from_name("debug"))
    old_ext = builder.implementation_ir.fields_extents
    legacy_ext = compute_legacy_extents(prepare_gtir(builder))

    for name, ext in old_ext.items():
        assert legacy_ext[name] == ext
示例#24
0
def test_generate_pre_run(backend_name, mode):
    backend_cls = backend_registry[backend_name]
    builder = StencilBuilder(stencil_def, backend=backend_cls).with_externals(
        {"MODE": mode})
    args_data = make_args_data_from_gtir(builder.gtir_pipeline)

    module_generator = backend_cls.MODULE_GENERATOR_CLASS()
    module_generator.args_data = args_data
    source = module_generator.generate_pre_run()

    if gt_backend.from_name(backend_name).storage_info["device"] == "cpu":
        assert source == ""
    else:
        for key in field_info_val[mode]:
            assert f"{key}.host_to_device()" in source
        for key in unreferenced_val[mode]:
            assert f"{key}.host_to_device()" not in source
示例#25
0
def test_field_if():
    def stencil(field_a: gs.Field[float], field_b: gs.Field[float]):
        with computation(PARALLEL), interval(...):
            if field_b[0, 1, 0] < 0.1:
                if field_b[1, 0, 0] > 1.0:
                    field_a = 0
                else:
                    field_a = 1
            else:
                tmp = -field_b[0, 1, 0]
                field_a = tmp

    builder = StencilBuilder(stencil, backend=from_name("debug"))
    old_ext = builder.implementation_ir.fields_extents
    legacy_ext = compute_legacy_extents(prepare_gtir(builder))

    for name, ext in old_ext.items():
        assert legacy_ext[name] == ext
示例#26
0
文件: storage.py 项目: havogt/gt4py
 def _is_consistent(self, obj):
     if not self.shape == obj.shape:
         return False
     # check strides
     stride = 0
     layout_map = [m for m in self.layout_map if m is not None]
     if len(self.strides) < len(layout_map):
         return False
     for dim in reversed(np.argsort(layout_map)):
         if self.strides[dim] < stride:
             return False
         stride = self.strides[dim]
     # check alignment
     if (self.ctypes.data + np.sum([
             o * s for o, s in zip(self.default_origin, self.strides)
     ])) % gt_backend.from_name(self.backend).storage_info["alignment"]:
         return False
     return True
示例#27
0
def run_test_view(backend):
    default_origin = (1, 1, 1)
    shape = (10, 10, 10)
    stor = gt_store.from_array(
        np.random.randn(*shape), default_origin=default_origin, backend=backend
    )
    stor.view(type(stor))
    if gt_backend.from_name(backend).storage_info["layout_map"]([True] * 3) != (0, 1, 2):
        try:
            np.ones((10, 10, 10)).view(type(stor))
        except RuntimeError:
            pass
        except Exception as e:
            raise e
        else:
            raise Exception
        tmp_view = stor[::2, ::2, ::2]
        assert not tmp_view._is_consistent(stor)
        assert not tmp_view.is_stencil_view
示例#28
0
def build_iir_stencil(name, options, backend="gtx86", *, id_version="xxxxxx"):
    if isinstance(backend, str):
        backend = gt_backend.from_name(backend)
    if not issubclass(backend, gt_backend.Backend):
        raise TypeError("Backend must be a backend identifier string or a gt4py Backend class.")

    iir_factory = REGISTRY[name]
    iir = iir_factory()
    stencil_id = StencilID("{}.{}".format(options.module, options.name), id_version)

    if options.rebuild:
        # Force recompilation
        stencil_class = None
    else:
        # Use cached version (if id_version matches)
        stencil_class = backend.load(stencil_id, None, options)

    if stencil_class is None:
        stencil_class = backend.build(stencil_id, iir, None, options)

    stencil_implementation = stencil_class()

    return stencil_implementation
示例#29
0
 def __descriptor__(self) -> "dace.data.Array":
     storage = (dace.StorageType.GPU_Global if hasattr(
         self, "__cuda_array_interface__") else
                dace.StorageType.CPU_Heap)
     start_offset = (int(
         np.array([self.default_origin]) @ np.array([self.strides]).T)
                     // self.itemsize)
     total_size = int(
         int(np.array([self.shape]) @ np.array([self.strides]).T) //
         self.itemsize)
     start_offset = (
         start_offset %
         gt_backend.from_name(self.backend).storage_info["alignment"])
     descriptor = dace.data.Array(
         shape=self.shape,
         strides=[s // self.itemsize for s in self.strides],
         dtype=dace.typeclass(str(self.dtype)),
         storage=storage,
         total_size=total_size,
         start_offset=start_offset,
     )
     descriptor.default_origin = self.default_origin
     return descriptor
示例#30
0
def run_mixed_backend(backend, id_version, domain):

    validate_field_names = ["out_field"]
    origins = {
        "in_field": (2, 2, 0),
        "out_field": (0, 0, 0),
        "coeff": (0, 0, 0)
    }
    shapes = {
        k: tuple(domain[i] + 2 * origins[k][i] for i in range(3))
        for k in origins.keys()
    }
    name = "horizontal_diffusion"

    arg_fields = get_reference(name, backend, domain, origins, shapes)
    validate_fields = {
        name + "_reference": arg_fields.pop(name + "_reference")
        for name in validate_field_names
    }

    testmodule = generate_test_module("horizontal_diffusion",
                                      gt_backend.from_name("numpy"),
                                      id_version=id_version,
                                      rebuild=False)

    testmodule.run(
        **arg_fields,
        # **{k: v.view(np.ndarray) for k, v in arg_fields.items()},
        _domain_=domain,
        _origin_=origins,
        exec_info=None,
    )

    for k in validate_field_names:
        np.testing.assert_allclose(
            arg_fields[k].view(np.ndarray),
            validate_fields[k + "_reference"].view(np.ndarray))