예제 #1
0
파일: misc.py 프로젝트: taichi-dev/taichi
def _block_dim_adaptive(block_dim_adaptive):
    """Enable/Disable backends set block_dim adaptively.
    """
    if get_runtime().prog.config.arch != cpu:
        _logging.warn('Adaptive block_dim is supported on CPU backend only')
    else:
        get_runtime().prog.config.cpu_block_dim_adaptive = block_dim_adaptive
예제 #2
0
 def __call__(self, *args, **kwargs):
     if self.is_grad and impl.current_cfg().opt_level == 0:
         _logging.warn(
             """opt_level = 1 is enforced to enable gradient computation."""
         )
         impl.current_cfg().opt_level = 1
     assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'
     key = self.ensure_compiled(*args)
     return self.compiled_functions[key](*args)
예제 #3
0
 def __call__(self, *args, **kwargs):
     args = _process_args(self, args, kwargs)
     if self.is_grad and impl.current_cfg().opt_level == 0:
         _logging.warn(
             """opt_level = 1 is enforced to enable gradient computation."""
         )
         impl.current_cfg().opt_level = 1
     key = self.ensure_compiled(*args)
     return self.compiled_functions[key](*args)
예제 #4
0
파일: misc.py 프로젝트: taichi-dev/taichi
def adaptive_arch_select(arch, enable_fallback, use_gles):
    if arch is None:
        return cpu
    if not isinstance(arch, (list, tuple)):
        arch = [arch]
    for a in arch:
        if is_arch_supported(a, use_gles):
            return a
    if not enable_fallback:
        raise RuntimeError(f'Arch={arch} is not supported')
    _logging.warn(f'Arch={arch} is not supported, falling back to CPU')
    return cpu
예제 #5
0
파일: misc.py 프로젝트: taichi-dev/taichi
def block_local(*args):
    """Hints Taichi to cache the fields and to enable the BLS optimization.

    Please visit https://docs.taichi-lang.org/docs/performance
    for how BLS is used.

    Args:
        *args (List[Field]): A list of sparse Taichi fields.
    """
    if impl.current_cfg().opt_level == 0:
        _logging.warn("""opt_level = 1 is enforced to enable bls analysis.""")
        impl.current_cfg().opt_level = 1
    for a in args:
        for v in a._get_field_members():
            get_runtime().prog.current_ast_builder().insert_snode_access_flag(
                _ti_core.SNodeAccessFlag.block_local, v.ptr)
예제 #6
0
파일: impl.py 프로젝트: Leonz5288/taichi
def _clamp_unsigned_to_range(npty, val):
    # npty: np.int32 or np.int64
    iif = np.iinfo(npty)
    if iif.min <= val <= iif.max:
        return val
    cap = (1 << iif.bits)
    if not 0 <= val < cap:
        # We let pybind11 fail intentionally, because this isn't the case we want
        # to deal with: |val| does't fall into the valid range of either
        # the signed or the unsigned type.
        return val
    new_val = val - cap
    warn(
        f'Constant {val} has exceeded the range of {iif.bits} int, clamped to {new_val}'
    )
    return new_val
예제 #7
0
    def __call__(self, *args, **kwargs):
        args = _process_args(self, args, kwargs)

        # Transform the primal kernel to forward mode grad kernel
        # then recover to primal when exiting the forward mode manager
        if self.runtime.fwd_mode_manager:
            # TODO: if we would like to compute 2nd-order derivatives by forward-on-reverse in a nested context manager fashion,
            # i.e., a `Tape` nested in the `FwdMode`, we can transform the kernels with `mode_original == AutodiffMode.REVERSE` only,
            # to avoid duplicate computation for 1st-order derivatives
            mode_original = self.autodiff_mode
            self.autodiff_mode = AutodiffMode.FORWARD
            self.runtime.fwd_mode_manager.insert(self, mode_original)

        if self.autodiff_mode != AutodiffMode.NONE and impl.current_cfg(
        ).opt_level == 0:
            _logging.warn(
                """opt_level = 1 is enforced to enable gradient computation."""
            )
            impl.current_cfg().opt_level = 1
        key = self.ensure_compiled(*args)
        return self.runtime.compiled_functions[key](*args)
예제 #8
0
import functools
import os
import sys

from taichi._logging import info, warn
from taichi.core.util import ti_core as _ti_core

try:
    import sourceinspect as oinspect  # pylint: disable=unused-import
except ImportError:
    warn('`sourceinspect` not installed!')
    warn(
        'Without this package Taichi may not function well in Python IDLE interactive shell, '
        'Blender scripting module and Python native shell.')
    warn('Please run `python3 -m pip install sourceinspect` to install.')
    import inspect as oinspect  # pylint: disable=unused-import

pybuf_enabled = False
_env_enable_pybuf = os.environ.get('TI_ENABLE_PYBUF', '1')
if not _env_enable_pybuf or int(_env_enable_pybuf):
    # When using in Jupyter / IDLE, the sys.stdout will be their wrapped ones.
    # While sys.__stdout__ should always be the raw console stdout.
    pybuf_enabled = sys.stdout is not sys.__stdout__

_ti_core.toggle_python_print_buffer(pybuf_enabled)


def _shell_pop_print(old_call):
    if not pybuf_enabled:
        # zero-overhead!
        return old_call
예제 #9
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         _test_mode=False,
         enable_fallback=True,
         **kwargs):
    """Initializes the Taichi runtime.

    This should always be the entry point of your Taichi program. Most
    importantly, it sets the backend used throughout the program.

    Args:
        arch: Backend to use. This is usually :const:`~taichi.lang.cpu` or :const:`~taichi.lang.gpu`.
        default_fp (Optional[type]): Default floating-point type.
        default_ip (Optional[type]): Default integral type.
        **kwargs: Taichi provides highly customizable compilation through
            ``kwargs``, which allows for fine grained control of Taichi compiler
            behavior. Below we list some of the most frequently used ones. For a
            complete list, please check out
            https://github.com/taichi-dev/taichi/blob/master/taichi/program/compile_config.h.

            * ``cpu_max_num_threads`` (int): Sets the number of threads used by the CPU thread pool.
            * ``debug`` (bool): Enables the debug mode, under which Taichi does a few more things like boundary checks.
            * ``print_ir`` (bool): Prints the CHI IR of the Taichi kernels.
            * ``packed`` (bool): Enables the packed memory layout. See https://docs.taichi.graphics/lang/articles/advanced/layout.
    """
    # Check version for users every 7 days if not disabled by users.
    skip = os.environ.get("TI_SKIP_VERSION_CHECK")
    if skip != 'ON':
        # We don't join this thread because we do not wish to block users.
        check_version_thread = threading.Thread(target=try_check_version,
                                                daemon=True)
        check_version_thread.start()

    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    reset()

    spec_cfg = _SpecialConfig()
    env_comp = _EnvironmentConfigurator(kwargs, cfg)
    env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)

    # configure default_fp/ip:
    # TODO: move these stuff to _SpecialConfig too:
    env_default_fp = os.environ.get("TI_DEFAULT_FP")
    if env_default_fp:
        if default_fp is not None:
            _ti_core.warn(
                f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
            )
        if env_default_fp == '32':
            default_fp = f32
        elif env_default_fp == '64':
            default_fp = f64
        elif env_default_fp is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')

    env_default_ip = os.environ.get("TI_DEFAULT_IP")
    if env_default_ip:
        if default_ip is not None:
            _ti_core.warn(
                f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
            )
        if env_default_ip == '32':
            default_ip = i32
        elif env_default_ip == '64':
            default_ip = i64
        elif env_default_ip is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')

    if default_fp is not None:
        impl.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        impl.get_runtime().set_default_ip(default_ip)

    # submodule configurations (spec_cfg):
    env_spec.add('log_level', str)
    env_spec.add('gdb_trigger')
    env_spec.add('experimental_real_function')
    env_spec.add('short_circuit_operators')
    env_spec.add('ndarray_use_torch')

    # compiler configurations (ti.cfg):
    for key in dir(cfg):
        if key in ['arch', 'default_fp', 'default_ip']:
            continue
        _cast = type(getattr(cfg, key))
        if _cast is bool:
            _cast = None
        env_comp.add(key, _cast)

    unexpected_keys = kwargs.keys()

    if len(unexpected_keys):
        raise KeyError(
            f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
        )

    # dispatch configurations that are not in ti.cfg:
    if not _test_mode:
        set_gdb_trigger(spec_cfg.gdb_trigger)
        impl.get_runtime().experimental_real_function = \
            spec_cfg.experimental_real_function
        impl.get_runtime().short_circuit_operators = \
            spec_cfg.short_circuit_operators
        impl.get_runtime().ndarray_use_torch = \
            spec_cfg.ndarray_use_torch
        _logging.set_logging_level(spec_cfg.log_level.lower())

    # select arch (backend):
    env_arch = os.environ.get('TI_ARCH')
    if env_arch is not None:
        _logging.info(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = _ti_core.arch_from_name(env_arch)
    cfg.arch = adaptive_arch_select(arch, enable_fallback, cfg.use_gles)
    if cfg.arch == cc:
        _ti_core.set_tmp_dir(locale_encode(prepare_sandbox()))
    print(f'[Taichi] Starting on arch={_ti_core.arch_name(cfg.arch)}')

    # Torch based ndarray on opengl backend allocates memory on host instead of opengl backend.
    # So it won't work.
    if cfg.arch == opengl and spec_cfg.ndarray_use_torch:
        _logging.warn(
            'Opengl backend doesn\'t support torch based ndarray. Setting ndarray_use_torch to False.'
        )
        impl.get_runtime().ndarray_use_torch = False

    if _test_mode:
        return spec_cfg

    get_default_kernel_profiler().set_kernel_profiler_mode(cfg.kernel_profiler)

    # create a new program:
    impl.get_runtime().create_program()

    _logging.trace('Materializing runtime...')
    impl.get_runtime().prog.materialize_runtime()

    impl._root_fb = FieldsBuilder()

    if not os.environ.get("TI_DISABLE_SIGNAL_HANDLERS", False):
        impl.get_runtime()._register_signal_handlers()

    return None