def send_crash_report(message, receiver=None): global emailed if emailed: return emailed = True if receiver is None: receiver = os.environ.get('TI_MONITOR_EMAIL', None) if receiver is None: tc.warn('No receiver in $TI_MONITOR_EMAIL') return tc.warn('Emailing {}'.format(receiver)) TO = receiver SUBJECT = 'Report' TEXT = message server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() server.login(gmail_sender, gmail_passwd) BODY = '\r\n'.join([ 'To: %s' % TO, 'From: %s' % gmail_sender, 'Subject: %s' % SUBJECT, '', TEXT ]) try: server.sendmail(gmail_sender, [TO], BODY) except: print('Error sending mail') server.quit() print('Press enter or Ctrl + \ to exit.')
def email_call_back(_): global crashed crashed = True tc.warn('Task has crashed.') message = f'Your task [{task_name}] at machine [{socket.gethostname()}] has crashed.' send_crash_report(message) atexit.unregister(at_exit) exit(-1)
def adaptive_arch_select(arch): if arch is None: return cpu if not isinstance(arch, (list, tuple)): arch = [arch] for a in arch: if is_arch_supported(a): return a ti.warn(f'Arch={arch} is not supported, falling back to CPU') return cpu
def __call__(self, *args, **kwargs): if self.is_grad and impl.current_cfg().opt_level == 0: ti.warn( """opt_level = 1 is enforced to enable gradient computation.""" ) impl.current_cfg().opt_level = 1 _taichi_skip_traceback = 1 assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels' key = self.ensure_compiled(*args) return self.compiled_functions[key](*args)
def warning(msg, type=UserWarning, stacklevel=1): import warnings import traceback import taichi as ti use_spdlog = False if use_spdlog: s = traceback.extract_stack()[:-stacklevel] raw = ''.join(traceback.format_list(s)) ti.warn(f'{type.__name__}: {msg}') ti.warn(f'\n{raw}') else: warnings.warn(msg, type, stacklevel=stacklevel + 1)
def adaptive_arch_select(arch, enable_fallback, use_gles): if arch is None: return cpu if not isinstance(arch, (list, tuple)): arch = [arch] for a in arch: if is_arch_supported(a, use_gles): return a if not enable_fallback: raise RuntimeError(f'Arch={arch} is not supported') ti.warn(f'Arch={arch} is not supported, falling back to CPU') return cpu
def adaptive_arch_select(arch): if arch is None: return cpu import taichi as ti supported = supported_archs() if isinstance(arch, list): for a in arch: if is_arch_supported(a): return a elif arch in supported: return arch ti.warn(f'Arch={arch} is not supported, falling back to CPU') return cpu
def _clamp_unsigned_to_range(npty, val): # npty: np.int32 or np.int64 iif = np.iinfo(npty) if iif.min <= val <= iif.max: return val cap = (1 << iif.bits) if not 0 <= val < cap: # We let pybind11 fail intentionally, because this isn't the case we want # to deal with: |val| does't fall into the valid range of either # the signed or the unsigned type. return val new_val = val - cap ti.warn( f'Constant {val} has exceeded the range of {iif.bits} int, clamped to {new_val}' ) return new_val
def block_local(*args): """Hints Taichi to cache the fields and to enable the BLS optimization. Please visit https://docs.taichi.graphics/lang/articles/advanced/performance for how BLS is used. Args: *args (List[Field]): A list of sparse Taichi fields. """ if impl.current_cfg().opt_level == 0: ti.warn("""opt_level = 1 is enforced to enable bls analysis.""") impl.current_cfg().opt_level = 1 for a in args: for v in a.get_field_members(): _ti_core.insert_snode_access_flag( _ti_core.SNodeAccessFlag.block_local, v.ptr)
def _test_cpp(): ti.reset() print("Running C++ tests...") ti_lib_dir = os.path.join(ti.__path__[0], 'lib') cpp_test_filename = 'taichi_cpp_tests' curr_dir = os.path.dirname(os.path.abspath(__file__)) build_dir = os.path.join(curr_dir, '../build') if os.path.exists(os.path.join(build_dir, cpp_test_filename)): subprocess.check_call(f'./{cpp_test_filename}', env={'TI_LIB_DIR': ti_lib_dir}, cwd=build_dir) else: ti.warn( f"C++ tests are skipped due to missing {cpp_test_filename} in {build_dir}", "Try building taichi with `TAICHI_CMAKE_ARGS=\'-DTI_BUILD_TESTS:BOOL=ON\' python setup.py develop`", "if you want to enable it.")
def init(default_fp=None, default_ip=None, print_preprocessed=None, debug=None, **kwargs): # Make a deepcopy in case these args reference to items from ti.cfg, which are # actually references. If no copy is made and the args are indeed references, # ti.reset() could override the args to their default values. default_fp = _deepcopy(default_fp) default_ip = _deepcopy(default_ip) kwargs = _deepcopy(kwargs) import taichi as ti ti.reset() if default_fp is not None: ti.get_runtime().set_default_fp(default_fp) if default_ip is not None: ti.get_runtime().set_default_ip(default_ip) if print_preprocessed is not None: ti.get_runtime().print_preprocessed = print_preprocessed if debug is None: debug = bool(int(os.environ.get('TI_DEBUG', '0'))) if debug: ti.set_logging_level(ti.TRACE) ti.cfg.debug = debug use_unified_memory = bool(int(os.environ.get('TI_USE_UNIFIED_MEMORY', '1'))) ti.cfg.use_unified_memory = use_unified_memory if not use_unified_memory: ti.warn( 'Unified memory disabled (env TI_USE_UNIFIED_MEMORY=0). This is experimental.' ) for k, v in kwargs.items(): setattr(ti.cfg, k, v) ti.get_runtime().create_program()
import sys, os, atexit, functools try: import sourceinspect as oinspect except ImportError: import taichi as ti ti.warn('`sourceinspect` not installed!') ti.warn( 'Without this package Taichi may not function well in Python IDLE interactive shell, ' 'Blender scripting module and Python native shell.') ti.warn('Please run `python3 -m pip install sourceinspect` to install.') import inspect as oinspect pybuf_enabled = False _env_enable_pybuf = os.environ.get('TI_ENABLE_PYBUF', '1') if not _env_enable_pybuf or int(_env_enable_pybuf): # When using in Jupyter / IDLE, the sys.stdout will be their wrapped ones. # While sys.__stdout__ should always be the raw console stdout. pybuf_enabled = sys.stdout is not sys.__stdout__ from .core import taichi_lang_core taichi_lang_core.toggle_python_print_buffer(pybuf_enabled) def _shell_pop_print(old_call): if not pybuf_enabled: # zero-overhead! return old_call import taichi as ti ti.info('Graphical python shell detected, using wrapped sys.stdout')
def init(arch=None, default_fp=None, default_ip=None, _test_mode=False, enable_fallback=True, **kwargs): """Initializes the Taichi runtime. This should always be the entry point of your Taichi program. Most importantly, it sets the backend used throughout the program. Args: arch: Backend to use. This is usually :const:`~taichi.lang.cpu` or :const:`~taichi.lang.gpu`. default_fp (Optional[type]): Default floating-point type. default_ip (Optional[type]): Default integral type. **kwargs: Taichi provides highly customizable compilation through ``kwargs``, which allows for fine grained control of Taichi compiler behavior. Below we list some of the most frequently used ones. For a complete list, please check out https://github.com/taichi-dev/taichi/blob/master/taichi/program/compile_config.h. * ``cpu_max_num_threads`` (int): Sets the number of threads used by the CPU thread pool. * ``debug`` (bool): Enables the debug mode, under which Taichi does a few more things like boundary checks. * ``print_ir`` (bool): Prints the CHI IR of the Taichi kernels. * ``packed`` (bool): Enables the packed memory layout. See https://docs.taichi.graphics/lang/articles/advanced/layout. """ # Check version for users every 7 days if not disabled by users. skip = os.environ.get("TI_SKIP_VERSION_CHECK") if skip != 'ON': try_check_version() # Make a deepcopy in case these args reference to items from ti.cfg, which are # actually references. If no copy is made and the args are indeed references, # ti.reset() could override the args to their default values. default_fp = _deepcopy(default_fp) default_ip = _deepcopy(default_ip) kwargs = _deepcopy(kwargs) ti.reset() spec_cfg = _SpecialConfig() env_comp = _EnvironmentConfigurator(kwargs, ti.cfg) env_spec = _EnvironmentConfigurator(kwargs, spec_cfg) # configure default_fp/ip: # TODO: move these stuff to _SpecialConfig too: env_default_fp = os.environ.get("TI_DEFAULT_FP") if env_default_fp: if default_fp is not None: _ti_core.warn( f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}' ) if env_default_fp == '32': default_fp = ti.f32 elif env_default_fp == '64': default_fp = ti.f64 elif env_default_fp is not None: raise ValueError( f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64') env_default_ip = os.environ.get("TI_DEFAULT_IP") if env_default_ip: if default_ip is not None: _ti_core.warn( f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}' ) if env_default_ip == '32': default_ip = ti.i32 elif env_default_ip == '64': default_ip = ti.i64 elif env_default_ip is not None: raise ValueError( f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64') if default_fp is not None: impl.get_runtime().set_default_fp(default_fp) if default_ip is not None: impl.get_runtime().set_default_ip(default_ip) # submodule configurations (spec_cfg): env_spec.add('print_preprocessed') env_spec.add('log_level', str) env_spec.add('gdb_trigger') env_spec.add('excepthook') env_spec.add('experimental_real_function') env_spec.add('short_circuit_operators') # compiler configurations (ti.cfg): for key in dir(ti.cfg): if key in ['arch', 'default_fp', 'default_ip']: continue _cast = type(getattr(ti.cfg, key)) if _cast is bool: _cast = None env_comp.add(key, _cast) unexpected_keys = kwargs.keys() if len(unexpected_keys): raise KeyError( f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}' ) # dispatch configurations that are not in ti.cfg: if not _test_mode: ti.set_gdb_trigger(spec_cfg.gdb_trigger) impl.get_runtime().print_preprocessed = spec_cfg.print_preprocessed impl.get_runtime().experimental_real_function = \ spec_cfg.experimental_real_function impl.get_runtime().short_circuit_operators = \ spec_cfg.short_circuit_operators ti.set_logging_level(spec_cfg.log_level.lower()) if spec_cfg.excepthook: # TODO(#1405): add a way to restore old excepthook ti.enable_excepthook() # select arch (backend): env_arch = os.environ.get('TI_ARCH') if env_arch is not None: ti.info(f'Following TI_ARCH setting up for arch={env_arch}') arch = _ti_core.arch_from_name(env_arch) ti.cfg.arch = adaptive_arch_select(arch, enable_fallback, ti.cfg.use_gles) if ti.cfg.arch == cc: _ti_core.set_tmp_dir(locale_encode(prepare_sandbox())) print(f'[Taichi] Starting on arch={_ti_core.arch_name(ti.cfg.arch)}') # Torch based ndarray on opengl backend allocates memory on host instead of opengl backend. # So it won't work. if ti.cfg.arch == opengl and ti.cfg.ndarray_use_torch: ti.warn( 'Opengl backend doesn\'t support torch based ndarray. Setting ndarray_use_torch to False.' ) ti.cfg.ndarray_use_torch = False if _test_mode: return spec_cfg get_default_kernel_profiler().set_kernel_profiler_mode( ti.cfg.kernel_profiler) # create a new program: impl.get_runtime().create_program() ti.trace('Materializing runtime...') impl.get_runtime().prog.materialize_runtime() impl._root_fb = FieldsBuilder() if not os.environ.get("TI_DISABLE_SIGNAL_HANDLERS", False): impl.get_runtime()._register_signal_handlers() return None
import sys, os, atexit, functools try: import sourceinspect as oinspect except ImportError: import taichi as ti ti.warn('`sourceinspect` not installed!') ti.warn('This may cause Taichi not functional in IDLE interactive shell, ' 'Blender scripting module and Python native shell.') ti.warn('Please run `python3 -m pip install sourceinspect` to install.') import inspect as oinspect pybuf_enabled = False _env_enable_pybuf = os.environ.get('TI_ENABLE_PYBUF', '1') if not _env_enable_pybuf or int(_env_enable_pybuf): # When using in Jupyter / IDLE, the sys.stdout will be their wrapped ones. pybuf_enabled = type(sys.stdout).__name__ != 'TextIOWrapper' from .core import taichi_lang_core taichi_lang_core.toggle_python_print_buffer(pybuf_enabled) def _shell_pop_print(old_call): if not pybuf_enabled: # zero-overhead! return old_call import taichi as ti ti.info('Graphical python shell detected, using wrapped sys.stdout') @functools.wraps(old_call)
def init(default_fp=None, default_ip=None, print_preprocessed=None, debug=None, **kwargs): # Make a deepcopy in case these args reference to items from ti.cfg, which are # actually references. If no copy is made and the args are indeed references, # ti.reset() could override the args to their default values. default_fp = _deepcopy(default_fp) default_ip = _deepcopy(default_ip) kwargs = _deepcopy(kwargs) import taichi as ti ti.reset() if default_fp is None: # won't override dfl_fp = os.environ.get("TI_DEFAULT_FP") if dfl_fp == 32: default_fp = core.DataType.f32 elif dfl_fp == 64: default_fp = core.DataType.f64 elif dfl_fp is not None: raise ValueError( f'Unrecognized TI_DEFAULT_FP: {dfl_fp}, should be 32 or 64') if default_ip is None: dfl_ip = os.environ.get("TI_DEFAULT_IP") if dfl_ip == 32: default_ip = core.DataType.i32 elif dfl_ip == 64: default_ip = core.DataType.i64 elif dfl_ip is not None: raise ValueError( f'Unrecognized TI_DEFAULT_IP: {dfl_ip}, should be 32 or 64') if default_fp is not None: ti.get_runtime().set_default_fp(default_fp) if default_ip is not None: ti.get_runtime().set_default_ip(default_ip) if print_preprocessed is not None: ti.get_runtime().print_preprocessed = print_preprocessed if debug is None: debug = bool(int(os.environ.get('TI_DEBUG', '0'))) if debug: ti.set_logging_level(ti.TRACE) ti.cfg.debug = debug use_unified_memory = bool(int(os.environ.get('TI_USE_UNIFIED_MEMORY', '1'))) ti.cfg.use_unified_memory = use_unified_memory if not use_unified_memory: ti.warn( 'Unified memory disabled (env TI_USE_UNIFIED_MEMORY=0). This is experimental.' ) for k, v in kwargs.items(): setattr(ti.cfg, k, v) def boolean_config(key, name=None): if name is None: name = 'TI_' + key.upper() value = os.environ.get(name) if value is not None: setattr(ti.cfg, key, len(value) and bool(int(value))) # does override boolean_config("print_ir") boolean_config("verbose") boolean_config("fast_math") arch = os.environ.get("TI_ARCH") if arch is not None: ti.cfg.arch = ti.core.arch_from_name(arch) log_level = os.environ.get("TI_LOG_LEVEL") if log_level is not None: ti.set_logging_level(log_level.lower()) ti.get_runtime().create_program()