Exemple #1
0
    def materialize(self):
        if self.materialized:
            return

        print('[Taichi] materializing...')
        self.create_program()

        ti.trace('Materializing runtime...')
        self.prog.materialize_runtime()
        root.finalize()

        self.materialized = True
        not_placed = []
        for var in self.global_vars:
            if var.ptr.snode() is None:
                tb = getattr(var, 'declaration_tb', str(var.ptr))
                not_placed.append(tb)

        if len(not_placed):
            bar = '=' * 44 + '\n'
            raise RuntimeError(
                f'These field(s) are not placed:\n{bar}' +
                f'{bar}'.join(not_placed) +
                f'{bar}Please consider specifying a shape for them. E.g.,' +
                '\n\n  x = ti.field(float, shape=(2, 3))')

        for callback in self.materialize_callbacks:
            callback()
        self.materialize_callbacks = []
Exemple #2
0
    def materialize(self):
        if self.materialized:
            return

        print('[Taichi] materializing...')
        self.create_program()

        def layout():
            for func in self.layout_functions:
                func()

        import taichi as ti
        ti.trace('Materializing layout...')
        taichi_lang_core.layout(layout)
        self.materialized = True
        not_placed = []
        for var in self.global_vars:
            if var.ptr.snode() is None:
                tb = getattr(var, 'declaration_tb', str(var.ptr))
                not_placed.append(tb)

        if len(not_placed):
            bar = '=' * 44 + '\n'
            raise RuntimeError(
                f'These field(s) are not placed:\n{bar}' +
                f'{bar}'.join(not_placed) +
                f'{bar}Please consider specifying a shape for them. E.g.,' +
                '\n\n  x = ti.field(float, shape=(2, 3))')
Exemple #3
0
    def run(self):
        # self.output(0)
        objectives = []
        blklog = open("{}/blocks.log".format(self.working_directory), "w")

        for i in range(self.i_start, self.max_iterations):
            blklog.write(self.get_block_counts() + '\n')
            blklog.flush()

            #if i % 5 == 1 and i > 20:
            #  tc.info("Computing objective")
            #  self.general_action(action='objective')
            obj = float(self.iterate(i))
            objectives.append(obj)
            if i > 10 and len(objectives) >= 4:
                r = abs((objectives[-1] + objectives[-2] - objectives[-3] -
                         objectives[-4]) / (objectives[-1] + objectives[-2]))
                tc.trace("r = {:4.2f}%", r * 100)
                if r < 5e-3:
                    tc.trace(
                        "*************** Should stop now,  Final objective: {}",
                        objectives[-1])
            #print("debug, exiting")
            #break

        blklog.close()
Exemple #4
0
    def materialize(self, key=None, args=None, arg_features=None):
        _taichi_skip_traceback = 1
        if key is None:
            key = (self.func, 0)
        self.runtime.materialize()
        if key in self.compiled_functions:
            return
        grad_suffix = ""
        if self.is_grad:
            grad_suffix = "_grad"
        kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}"
        ti.trace(f"Compiling kernel {kernel_name}...")

        tree, ctx = _get_tree_and_ctx(
            self,
            args=args,
            excluded_parameters=self.template_slot_locations,
            arg_features=arg_features)

        if self.is_grad:
            KernelSimplicityASTChecker(self.func).visit(tree)

        # Do not change the name of 'taichi_ast_generator'
        # The warning system needs this identifier to remove unnecessary messages
        def taichi_ast_generator():
            _taichi_skip_traceback = 1
            if self.runtime.inside_kernel:
                raise TaichiSyntaxError(
                    "Kernels cannot call other kernels. I.e., nested kernels are not allowed. "
                    "Please check if you have direct/indirect invocation of kernels within kernels. "
                    "Note that some methods provided by the Taichi standard library may invoke kernels, "
                    "and please move their invocations to Python-scope.")
            self.runtime.inside_kernel = True
            self.runtime.current_kernel = self
            try:
                transform_tree(tree, ctx)
                if not impl.get_runtime().experimental_real_function:
                    if self.return_type and not ctx.returned:
                        raise TaichiSyntaxError(
                            "Kernel has a return type but does not have a return statement"
                        )
            finally:
                self.runtime.inside_kernel = False
                self.runtime.current_kernel = None

        taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,
                                               kernel_name, self.is_grad)

        self.kernel_cpp = taichi_kernel

        assert key not in self.compiled_functions
        self.compiled_functions[key] = self.get_function_body(taichi_kernel)
Exemple #5
0
    def materialize_ast_refactor(self, key=None, args=None, arg_features=None):
        _taichi_skip_traceback = 1
        if key is None:
            key = (self.func, 0)
        self.runtime.materialize()
        if key in self.compiled_functions:
            return
        grad_suffix = ""
        if self.is_grad:
            grad_suffix = "_grad"
        kernel_name = "{}_c{}_{}{}".format(self.func.__name__,
                                           self.kernel_counter, key[1],
                                           grad_suffix)
        ti.trace("Compiling kernel {}...".format(kernel_name))

        tree, global_vars = _get_tree_and_global_vars(self, args)

        if self.is_grad:
            KernelSimplicityASTChecker(self.func).visit(tree)
        visitor = ASTTransformerTotal(
            excluded_parameters=self.template_slot_locations,
            func=self,
            arg_features=arg_features,
            globals=global_vars)

        ast.increment_lineno(tree, oinspect.getsourcelines(self.func)[1] - 1)

        # Do not change the name of 'taichi_ast_generator'
        # The warning system needs this identifier to remove unnecessary messages
        def taichi_ast_generator():
            _taichi_skip_traceback = 1
            if self.runtime.inside_kernel:
                raise TaichiSyntaxError(
                    "Kernels cannot call other kernels. I.e., nested kernels are not allowed. Please check if you have direct/indirect invocation of kernels within kernels. Note that some methods provided by the Taichi standard library may invoke kernels, and please move their invocations to Python-scope."
                )
            self.runtime.inside_kernel = True
            self.runtime.current_kernel = self
            try:
                visitor.visit(tree)
            finally:
                self.runtime.inside_kernel = False
                self.runtime.current_kernel = None

        taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,
                                               kernel_name, self.is_grad)

        self.kernel_cpp = taichi_kernel

        assert key not in self.compiled_functions
        self.compiled_functions[key] = self.get_function_body(taichi_kernel)
Exemple #6
0
    def materialize(self):
        if self.materialized:
            return
        self.create_program()
        Expr.layout_materialized = True

        def layout():
            for func in self.layout_functions:
                func()

        import taichi as ti
        ti.trace("Materializing layout...".format())
        taichi_lang_core.layout(layout)
        self.materialized = True
        for var in self.global_vars:
            assert var.ptr.snode() is not None, 'Some variable(s) not placed'
Exemple #7
0
    def materialize(self):
        if self.materialized:
            return

        print('[Taichi] materializing...')
        self.create_program()

        def layout():
            for func in self.layout_functions:
                func()

        import taichi as ti
        ti.trace('Materializing layout...')
        taichi_lang_core.layout(layout)
        self.materialized = True
        for var in self.global_vars:
            if var.ptr.snode() is None:
                raise RuntimeError(
                    'Some variable(s) are not placed.'
                    ' Did you forget to specify the shape of any field? E.g., the "shape" argument in'
                    ' ti.field(dtype=ti.f32, shape=(3, 4))')
Exemple #8
0
    def iterate(self, i):
        tc.trace("Starting Iteration {}...".format(i))
        #if i > 10:
        #  self.general_action(action="set_step_limit", value=0.0)
        objective = float(self.general_action("iterate", iter=i))
        tc.trace("\n**** Task {}".format(self.task_id))
        #tc.trace("\n**** Iteration {} finished.\n*** (Maximum change = {:6.3})".format(i, changed))
        tc.trace(
            "\n**** Iteration {} finished.\n*** (objective = {:6.3f})".format(
                i, objective))
        # self.output(i + 1)
        tc.core.print_profile_info()

        if self.snapshot_period != 0 and i % self.snapshot_period == 0:
            self.general_action('save_state',
                                filename=self.get_snapshot_file_name(i))
        return objective
Exemple #9
0
    def materialize(self, key=None, args=None, arg_features=None):
        if impl.get_runtime().experimental_ast_refactor:
            return self.materialize_ast_refactor(key=key,
                                                 args=args,
                                                 arg_features=arg_features)
        _taichi_skip_traceback = 1
        if key is None:
            key = (self.func, 0)
        self.runtime.materialize()
        if key in self.compiled_functions:
            return
        grad_suffix = ""
        if self.is_grad:
            grad_suffix = "_grad"
        kernel_name = "{}_c{}_{}{}".format(self.func.__name__,
                                           self.kernel_counter, key[1],
                                           grad_suffix)
        ti.trace("Compiling kernel {}...".format(kernel_name))

        src = textwrap.dedent(oinspect.getsource(self.func))
        tree = ast.parse(src)

        func_body = tree.body[0]
        func_body.decorator_list = []

        local_vars = {}
        global_vars = _get_global_vars(self.func)

        for i, arg in enumerate(func_body.args.args):
            anno = arg.annotation
            if isinstance(anno, ast.Name):
                global_vars[anno.id] = self.argument_annotations[i]

        if isinstance(func_body.returns, ast.Name):
            global_vars[func_body.returns.id] = self.return_type

        if self.is_grad:
            KernelSimplicityASTChecker(self.func).visit(tree)

        visitor = ASTTransformerTotal(
            excluded_parameters=self.template_slot_locations,
            func=self,
            arg_features=arg_features)

        visitor.visit(tree)

        ast.increment_lineno(tree, oinspect.getsourcelines(self.func)[1] - 1)

        # inject template parameters into globals
        for i in self.template_slot_locations:
            template_var_name = self.argument_names[i]
            global_vars[template_var_name] = args[i]

        exec(
            compile(tree,
                    filename=oinspect.getsourcefile(self.func),
                    mode='exec'), global_vars, local_vars)
        compiled = local_vars[self.func.__name__]

        # Do not change the name of 'taichi_ast_generator'
        # The warning system needs this identifier to remove unnecessary messages
        def taichi_ast_generator():
            _taichi_skip_traceback = 1
            if self.runtime.inside_kernel:
                raise TaichiSyntaxError(
                    "Kernels cannot call other kernels. I.e., nested kernels are not allowed. Please check if you have direct/indirect invocation of kernels within kernels. Note that some methods provided by the Taichi standard library may invoke kernels, and please move their invocations to Python-scope."
                )
            self.runtime.inside_kernel = True
            self.runtime.current_kernel = self
            try:
                compiled()
            finally:
                self.runtime.inside_kernel = False
                self.runtime.current_kernel = None

        taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,
                                               kernel_name, self.is_grad)

        self.kernel_cpp = taichi_kernel

        assert key not in self.compiled_functions
        self.compiled_functions[key] = self.get_function_body(taichi_kernel)
Exemple #10
0
def init(default_fp=None,
         default_ip=None,
         print_preprocessed=None,
         debug=None,
         **kwargs):
    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    import taichi as ti
    ti.reset()

    if default_fp is None:  # won't override
        dfl_fp = os.environ.get("TI_DEFAULT_FP")
        if dfl_fp == 32:
            default_fp = core.DataType.f32
        elif dfl_fp == 64:
            default_fp = core.DataType.f64
        elif dfl_fp is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_FP: {dfl_fp}, should be 32 or 64')
    if default_ip is None:
        dfl_ip = os.environ.get("TI_DEFAULT_IP")
        if dfl_ip == 32:
            default_ip = core.DataType.i32
        elif dfl_ip == 64:
            default_ip = core.DataType.i64
        elif dfl_ip is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_IP: {dfl_ip}, should be 32 or 64')

    if print_preprocessed is None:  # won't override
        print_preprocessed = os.environ.get("TI_PRINT_PREPROCESSED")
        if print_preprocessed is not None:
            print_preprocessed = bool(int(print_preprocessed))

    if default_fp is not None:
        ti.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        ti.get_runtime().set_default_ip(default_ip)
    if print_preprocessed is not None:
        ti.get_runtime().print_preprocessed = print_preprocessed

    if debug is None:
        debug = bool(int(os.environ.get('TI_DEBUG', '0')))
    if debug:
        ti.set_logging_level(ti.TRACE)
    ti.cfg.debug = debug

    unified_memory = os.environ.get('TI_USE_UNIFIED_MEMORY', '')
    if unified_memory != '':
        use_unified_memory = bool(int(unified_memory))
        ti.cfg.use_unified_memory = use_unified_memory
        if not use_unified_memory:
            ti.trace(
                'Unified memory disabled (env TI_USE_UNIFIED_MEMORY=0). This is experimental.'
            )

    for k, v in kwargs.items():
        setattr(ti.cfg, k, v)

    def boolean_config(key, name=None):
        if name is None:
            name = 'TI_' + key.upper()
        value = os.environ.get(name)
        if value is not None:
            setattr(ti.cfg, key, len(value) and bool(int(value)))

    # does override
    boolean_config("print_ir")
    boolean_config("verbose")
    boolean_config("fast_math")
    arch = os.environ.get("TI_ARCH")
    if arch is not None:
        ti.cfg.arch = ti.core.arch_from_name(arch)

    log_level = os.environ.get("TI_LOG_LEVEL")
    if log_level is not None:
        ti.set_logging_level(log_level.lower())

    ti.get_runtime().create_program()
Exemple #11
0
    def materialize(self, key=None, args=None, arg_features=None):
        if key is None:
            key = (self.func, 0)
        if not self.runtime.materialized:
            self.runtime.materialize()
        if key in self.compiled_functions:
            return
        grad_suffix = ""
        if self.is_grad:
            grad_suffix = "_grad"
        kernel_name = "{}_c{}_{}{}".format(self.func.__name__,
                                           self.kernel_counter, key[1],
                                           grad_suffix)
        import taichi as ti
        ti.trace("Compiling kernel {}...".format(kernel_name))

        src = remove_indent(inspect.getsource(self.func))
        tree = ast.parse(src)
        if self.runtime.print_preprocessed:
            import astor
            print('Before preprocessing:')
            print(astor.to_source(tree.body[0]))

        func_body = tree.body[0]
        func_body.decorator_list = []

        local_vars = {}
        # Discussions: https://github.com/yuanming-hu/taichi/issues/282
        import copy
        global_vars = copy.copy(self.func.__globals__)

        for i, arg in enumerate(func_body.args.args):
            anno = arg.annotation
            if isinstance(anno, ast.Name):
                global_vars[anno.id] = self.arguments[i]

        if self.is_grad:
            from .ast_checker import KernelSimplicityASTChecker
            KernelSimplicityASTChecker(self.func).visit(tree)

        visitor = ASTTransformer(
            excluded_paremeters=self.template_slot_locations,
            func=self,
            arg_features=arg_features)

        visitor.visit(tree)
        ast.fix_missing_locations(tree)

        if self.runtime.print_preprocessed:
            import astor
            print('After preprocessing:')
            print(astor.to_source(tree.body[0], indent_with='  '))

        ast.increment_lineno(tree, inspect.getsourcelines(self.func)[1] - 1)

        freevar_names = self.func.__code__.co_freevars
        closure = self.func.__closure__
        if closure:
            freevar_values = list(map(lambda x: x.cell_contents, closure))
            for name, value in zip(freevar_names, freevar_values):
                global_vars[name] = value

        # inject template parameters into globals
        for i in self.template_slot_locations:
            template_var_name = self.argument_names[i]
            global_vars[template_var_name] = args[i]

        exec(
            compile(tree,
                    filename=inspect.getsourcefile(self.func),
                    mode='exec'), global_vars, local_vars)
        compiled = local_vars[self.func.__name__]

        taichi_kernel = taichi_lang_core.create_kernel(kernel_name,
                                                       self.is_grad)

        # Do not change the name of 'taichi_ast_generator'
        # The warning system needs this identifier to remove unnecessary messages
        def taichi_ast_generator():
            if self.runtime.inside_kernel:
                import taichi as ti
                raise ti.TaichiSyntaxError(
                    "Kernels cannot call other kernels. I.e., nested kernels are not allowed. Please check if you have direct/indirect invocation of kernels within kernels. Note that some methods provided by the Taichi standard library may invoke kernels, and please move their invocations to Python-scope."
                )
            self.runtime.inside_kernel = True
            compiled()
            self.runtime.inside_kernel = False

        taichi_kernel = taichi_kernel.define(taichi_ast_generator)

        assert key not in self.compiled_functions
        self.compiled_functions[key] = self.get_function_body(taichi_kernel)
Exemple #12
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         print_preprocessed=None,
         debug=None,
         **kwargs):
    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    import taichi as ti
    ti.reset()

    if default_fp is None:  # won't override
        dfl_fp = os.environ.get("TI_DEFAULT_FP")
        if dfl_fp == 32:
            default_fp = core.DataType.f32
        elif dfl_fp == 64:
            default_fp = core.DataType.f64
        elif dfl_fp is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_FP: {dfl_fp}, should be 32 or 64')
    if default_ip is None:
        dfl_ip = os.environ.get("TI_DEFAULT_IP")
        if dfl_ip == 32:
            default_ip = core.DataType.i32
        elif dfl_ip == 64:
            default_ip = core.DataType.i64
        elif dfl_ip is not None:
            raise ValueError(
                f'Unrecognized TI_DEFAULT_IP: {dfl_ip}, should be 32 or 64')

    if print_preprocessed is None:  # won't override
        print_preprocessed = os.environ.get("TI_PRINT_PREPROCESSED")
        if print_preprocessed is not None:
            print_preprocessed = bool(int(print_preprocessed))

    if default_fp is not None:
        ti.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        ti.get_runtime().set_default_ip(default_ip)
    if print_preprocessed is not None:
        ti.get_runtime().print_preprocessed = print_preprocessed

    if debug is None:
        debug = bool(int(os.environ.get('TI_DEBUG', '0')))
    if debug:
        ti.set_logging_level(ti.TRACE)
    ti.cfg.debug = debug

    unified_memory = os.environ.get('TI_USE_UNIFIED_MEMORY', '')
    if unified_memory != '':
        use_unified_memory = bool(int(unified_memory))
        ti.cfg.use_unified_memory = use_unified_memory
        if not use_unified_memory:
            ti.trace(
                'Unified memory disabled (env TI_USE_UNIFIED_MEMORY=0). This is experimental.'
            )

    for k, v in kwargs.items():
        setattr(ti.cfg, k, v)

    def bool_int(x):
        return bool(int(x))

    def environ_config(key, cast=bool_int):
        name = 'TI_' + key.upper()
        value = os.environ.get(name, '')
        if len(value):
            setattr(ti.cfg, key, cast(value))

        # TI_ASYNC=   : not work
        # TI_ASYNC=0  : False
        # TI_ASYNC=1  : True

    # does override
    environ_config("print_ir")
    environ_config("verbose")
    environ_config("fast_math")
    environ_config("async")
    environ_config("print_benchmark_stat")
    environ_config("device_memory_fraction", float)
    environ_config("device_memory_GB", float)

    # Q: Why not environ_config("gdb_trigger")?
    # A: We don't have ti.cfg.gdb_trigger yet.
    # Discussion: https://github.com/taichi-dev/taichi/pull/879
    gdb_trigger = os.environ.get('TI_GDB_TRIGGER', '')
    if len(gdb_trigger):
        ti.set_gdb_trigger(bool(int(gdb_trigger)))

    # Q: Why not environ_config("arch", ti.core.arch_from_name)?
    # A: We need adaptive_arch_select for all.
    env_arch = os.environ.get("TI_ARCH")
    if env_arch is not None:
        print(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = ti.core.arch_from_name(env_arch)

    ti.cfg.arch = adaptive_arch_select(arch)

    log_level = os.environ.get("TI_LOG_LEVEL")
    if log_level is not None:
        ti.set_logging_level(log_level.lower())

    ti.get_runtime().create_program()
Exemple #13
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         _test_mode=False,
         **kwargs):

    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    ti.reset()

    spec_cfg = _SpecialConfig()
    env_comp = _EnvironmentConfigurator(kwargs, ti.cfg)
    env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)

    # configure default_fp/ip:
    # TODO: move these stuff to _SpecialConfig too:
    env_default_fp = os.environ.get("TI_DEFAULT_FP")
    if env_default_fp:
        if default_fp is not None:
            _ti_core.warn(
                f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
            )
        if env_default_fp == '32':
            default_fp = ti.f32
        elif env_default_fp == '64':
            default_fp = ti.f64
        elif env_default_fp is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')

    env_default_ip = os.environ.get("TI_DEFAULT_IP")
    if env_default_ip:
        if default_ip is not None:
            _ti_core.warn(
                f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
            )
        if env_default_ip == '32':
            default_ip = ti.i32
        elif env_default_ip == '64':
            default_ip = ti.i64
        elif env_default_ip is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')

    if default_fp is not None:
        impl.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        impl.get_runtime().set_default_ip(default_ip)

    # submodule configurations (spec_cfg):
    env_spec.add('print_preprocessed')
    env_spec.add('log_level', str)
    env_spec.add('gdb_trigger')
    env_spec.add('excepthook')
    env_spec.add('experimental_real_function')

    # compiler configurations (ti.cfg):
    for key in dir(ti.cfg):
        if key in ['arch', 'default_fp', 'default_ip']:
            continue
        cast = type(getattr(ti.cfg, key))
        if cast is bool:
            cast = None
        env_comp.add(key, cast)

    unexpected_keys = kwargs.keys()
    if len(unexpected_keys):
        raise KeyError(
            f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
        )

    # dispatch configurations that are not in ti.cfg:
    if not _test_mode:
        ti.set_gdb_trigger(spec_cfg.gdb_trigger)
        impl.get_runtime().print_preprocessed = spec_cfg.print_preprocessed
        impl.get_runtime().experimental_real_function = \
            spec_cfg.experimental_real_function
        ti.set_logging_level(spec_cfg.log_level.lower())
        if spec_cfg.excepthook:
            # TODO(#1405): add a way to restore old excepthook
            ti.enable_excepthook()

    # select arch (backend):
    env_arch = os.environ.get('TI_ARCH')
    if env_arch is not None:
        ti.info(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = _ti_core.arch_from_name(env_arch)
    ti.cfg.arch = adaptive_arch_select(arch)
    print(f'[Taichi] Starting on arch={_ti_core.arch_name(ti.cfg.arch)}')

    if _test_mode:
        return spec_cfg

    # create a new program:
    impl.get_runtime().create_program()

    ti.trace('Materializing runtime...')
    impl.get_runtime().prog.materialize_runtime()

    impl._root_fb = FieldsBuilder()
Exemple #14
0
  def __init__(self, snapshot_interval=20, **kwargs):
    res = kwargs['res']
    self.frame_dt = kwargs.get('frame_dt', 0.01)
    if 'frame_dt' not in kwargs:
      kwargs['frame_dt'] = self.frame_dt
    self.num_frames = kwargs.get('num_frames', 1000)
    if len(res) == 2:
      self.c = tc_core.create_simulation2('mpm')
      self.Vector = tc_core.Vector2f
      self.Vectori = tc_core.Vector2i
    else:
      self.c = tc.core.create_simulation3('mpm')
      self.Vector = tc_core.Vector3f
      self.Vectori = tc_core.Vector3i
    
    self.snapshot_interval = snapshot_interval

    if 'task_id' in kwargs:
      self.task_id = kwargs['task_id']
    else:
      self.task_id = sys.argv[0].split('.')[0]
    if 'delta_x' not in kwargs:
      kwargs['delta_x'] = 1.0 / res[0]
      
    print('delta_x = {}'.format(kwargs['delta_x']))
    print('task_id = {}'.format(self.task_id))
    
    self.directory = tc.get_output_path('mpm/' + self.task_id, True)
    self.snapshot_directory = os.path.join(self.directory, 'snapshots')
    self.video_manager = VideoManager(self.directory)
    kwargs['frame_directory'] = self.video_manager.get_frame_directory()
    
    self.log_fn = os.path.join(self.directory, 'log.txt')
    tc.duplicate_stdout_to_file(self.log_fn)
    tc.redirect_print_to_log()
    tc.trace("log_fn = {}", self.log_fn)

    try:
      opts, args = getopt.getopt(sys.argv[1:], 'c:d:', ['continue=', 'dt-multiplier='])
    except getopt.GetoptError as err:
      print(err)
      # TODO: output usage here
      sys.exit()
    self.continue_opt = False
    self.continue_frame = ''
    for o, a in opts:
      if o in ('--continue', '-c'):
        print('clear_output_directory is disabled with --continue.')
        self.continue_opt = True
        self.continue_frame = int(a)
      elif o in ('--dt-multiplier', '-d'):
        kwargs['dt-multiplier'] = float(a)

    self.c.initialize(P(**kwargs))
    self.check_directory(self.directory)
    self.check_directory(self.snapshot_directory)
    vis_res = self.c.get_vis_resolution()
    self.video_manager.width = vis_res.x
    self.video_manager.height = vis_res.y
    self.particle_renderer = ParticleRenderer(
        'shadow_map',
        shadow_map_resolution=0.3,
        alpha=0.7,
        shadowing=2,
        ambient_light=0.01,
        light_direction=(1, 1, 0))
    self.res = kwargs['res']
    self.c.frame = 0

    dummy_levelset = self.create_levelset()

    def dummy_levelset_generator(_):
      return dummy_levelset

    self.levelset_generator = dummy_levelset_generator
    self.start_simulation_time = None
    self.simulation_total_time = None
    self.visualize_count = 0
    self.visualize_count_limit = 400000.0
Exemple #15
0
def init(arch=None,
         default_fp=None,
         default_ip=None,
         _test_mode=False,
         enable_fallback=True,
         **kwargs):
    """Initializes the Taichi runtime.

    This should always be the entry point of your Taichi program. Most
    importantly, it sets the backend used throughout the program.

    Args:
        arch: Backend to use. This is usually :const:`~taichi.lang.cpu` or :const:`~taichi.lang.gpu`.
        default_fp (Optional[type]): Default floating-point type.
        default_ip (Optional[type]): Default integral type.
        **kwargs: Taichi provides highly customizable compilation through
            ``kwargs``, which allows for fine grained control of Taichi compiler
            behavior. Below we list some of the most frequently used ones. For a
            complete list, please check out
            https://github.com/taichi-dev/taichi/blob/master/taichi/program/compile_config.h.

            * ``cpu_max_num_threads`` (int): Sets the number of threads used by the CPU thread pool.
            * ``debug`` (bool): Enables the debug mode, under which Taichi does a few more things like boundary checks.
            * ``print_ir`` (bool): Prints the CHI IR of the Taichi kernels.
            * ``packed`` (bool): Enables the packed memory layout. See https://docs.taichi.graphics/lang/articles/advanced/layout.
    """
    # Check version for users every 7 days if not disabled by users.
    skip = os.environ.get("TI_SKIP_VERSION_CHECK")
    if skip != 'ON':
        try_check_version()

    # Make a deepcopy in case these args reference to items from ti.cfg, which are
    # actually references. If no copy is made and the args are indeed references,
    # ti.reset() could override the args to their default values.
    default_fp = _deepcopy(default_fp)
    default_ip = _deepcopy(default_ip)
    kwargs = _deepcopy(kwargs)
    ti.reset()

    spec_cfg = _SpecialConfig()
    env_comp = _EnvironmentConfigurator(kwargs, ti.cfg)
    env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)

    # configure default_fp/ip:
    # TODO: move these stuff to _SpecialConfig too:
    env_default_fp = os.environ.get("TI_DEFAULT_FP")
    if env_default_fp:
        if default_fp is not None:
            _ti_core.warn(
                f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
            )
        if env_default_fp == '32':
            default_fp = ti.f32
        elif env_default_fp == '64':
            default_fp = ti.f64
        elif env_default_fp is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')

    env_default_ip = os.environ.get("TI_DEFAULT_IP")
    if env_default_ip:
        if default_ip is not None:
            _ti_core.warn(
                f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
            )
        if env_default_ip == '32':
            default_ip = ti.i32
        elif env_default_ip == '64':
            default_ip = ti.i64
        elif env_default_ip is not None:
            raise ValueError(
                f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')

    if default_fp is not None:
        impl.get_runtime().set_default_fp(default_fp)
    if default_ip is not None:
        impl.get_runtime().set_default_ip(default_ip)

    # submodule configurations (spec_cfg):
    env_spec.add('print_preprocessed')
    env_spec.add('log_level', str)
    env_spec.add('gdb_trigger')
    env_spec.add('excepthook')
    env_spec.add('experimental_real_function')
    env_spec.add('short_circuit_operators')

    # compiler configurations (ti.cfg):
    for key in dir(ti.cfg):
        if key in ['arch', 'default_fp', 'default_ip']:
            continue
        _cast = type(getattr(ti.cfg, key))
        if _cast is bool:
            _cast = None
        env_comp.add(key, _cast)

    unexpected_keys = kwargs.keys()

    if len(unexpected_keys):
        raise KeyError(
            f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
        )

    # dispatch configurations that are not in ti.cfg:
    if not _test_mode:
        ti.set_gdb_trigger(spec_cfg.gdb_trigger)
        impl.get_runtime().print_preprocessed = spec_cfg.print_preprocessed
        impl.get_runtime().experimental_real_function = \
            spec_cfg.experimental_real_function
        impl.get_runtime().short_circuit_operators = \
            spec_cfg.short_circuit_operators
        ti.set_logging_level(spec_cfg.log_level.lower())
        if spec_cfg.excepthook:
            # TODO(#1405): add a way to restore old excepthook
            ti.enable_excepthook()

    # select arch (backend):
    env_arch = os.environ.get('TI_ARCH')
    if env_arch is not None:
        ti.info(f'Following TI_ARCH setting up for arch={env_arch}')
        arch = _ti_core.arch_from_name(env_arch)
    ti.cfg.arch = adaptive_arch_select(arch, enable_fallback, ti.cfg.use_gles)
    if ti.cfg.arch == cc:
        _ti_core.set_tmp_dir(locale_encode(prepare_sandbox()))
    print(f'[Taichi] Starting on arch={_ti_core.arch_name(ti.cfg.arch)}')

    # Torch based ndarray on opengl backend allocates memory on host instead of opengl backend.
    # So it won't work.
    if ti.cfg.arch == opengl and ti.cfg.ndarray_use_torch:
        ti.warn(
            'Opengl backend doesn\'t support torch based ndarray. Setting ndarray_use_torch to False.'
        )
        ti.cfg.ndarray_use_torch = False

    if _test_mode:
        return spec_cfg

    get_default_kernel_profiler().set_kernel_profiler_mode(
        ti.cfg.kernel_profiler)

    # create a new program:
    impl.get_runtime().create_program()

    ti.trace('Materializing runtime...')
    impl.get_runtime().prog.materialize_runtime()

    impl._root_fb = FieldsBuilder()

    if not os.environ.get("TI_DISABLE_SIGNAL_HANDLERS", False):
        impl.get_runtime()._register_signal_handlers()

    return None
Exemple #16
0
    def __init__(self, **kwargs):
        res = kwargs['res']
        self.script_name = sys.argv[0].split('.')[0]
        assert (self.script_name.startswith('opt_'))
        self.script_name = self.script_name[4:]
        self.snapshot_period = kwargs.get('snapshot_period', 0)
        script_fn = os.path.join(os.getcwd(), sys.argv[0])

        suffix = ''

        self.version = kwargs.get('version', 0)
        if 'version' in kwargs:
            suffix += '_v{:0d}'.format(int(self.version))

        self.wireframe = kwargs.get('wireframe', False)
        if 'wireframe' in kwargs:
            if 'wireframe_grid_size' not in kwargs:
                kwargs['wireframe_grid_size'] = 10
            if 'wireframe_thickness' not in kwargs:
                kwargs['wireframe_thickness'] = 3
            if self.wireframe:
                suffix += '_wf{}g{}t{}'.format(int(self.wireframe),
                                               kwargs['wireframe_grid_size'],
                                               kwargs['wireframe_thickness'])
            else:
                suffix += '_wf{}'.format(int(self.wireframe))

        suffix += '_r{:04d}'.format(res[0])

        parser = argparse.ArgumentParser(description='Topology Optimization.')
        parser.add_argument('options',
                            metavar='Option',
                            type=str,
                            nargs='*',
                            help='An option to override')
        parser.add_argument('-c', type=str, help='iteration to start from')

        args = parser.parse_args()

        if args.c is not None:
            suffix += '_continue'

        self.task_id = get_unique_task_id()
        self.suffix = suffix + kwargs.get('suffix', '')

        self.working_directory = os.path.join(tc.get_output_directory(),
                                              'topo_opt', self.script_name,
                                              self.task_id + '_' + self.suffix)
        kwargs['working_directory'] = self.working_directory
        self.snapshot_directory = os.path.join(self.working_directory,
                                               'snapshots')
        self.fem_directory = os.path.join(self.working_directory, 'fem')
        self.fem_obj_directory = os.path.join(self.working_directory,
                                              'fem_obj')

        os.makedirs(self.snapshot_directory, exist_ok=True)
        os.makedirs(self.fem_directory, exist_ok=True)
        os.makedirs(self.fem_obj_directory, exist_ok=True)
        self.max_iterations = kwargs.get('max_iterations', 1000)

        self.log_fn = os.path.join(self.working_directory, 'log.txt')
        tc.start_memory_monitoring(os.path.join(self.working_directory,
                                                'memory_usage.txt'),
                                   interval=0.1)
        tc.duplicate_stdout_to_file(self.log_fn)
        tc.redirect_print_to_log()
        tc.trace("log_fn = {}", self.log_fn)

        with open(script_fn) as f:
            script_content = f.read()

        shutil.copy(sys.argv[0], self.working_directory + "/")
        tc.info("Script backuped")
        tc.info(
            "Script content:\n********************\n\n{}\n*******************",
            script_content)
        print(args)
        super().__init__(name='spgrid_topo_opt', **kwargs)

        if args.c is not None:
            # Restart (continue)
            print(args.options)
            print(args.c)

            last_iter = self.general_action(
                action='load_state',
                #filename=self.get_snapshot_file_name(args.c))
                filename=args.c)
            for o in args.options:
                o = o.split('=')
                assert (len(o) == 2)
                self.override_parameter(o[0], o[1])

            self.i_start = int(last_iter) + 1
            tc.info("\n*** Restarting from iter {}", self.i_start)
            self.run()
            exit()

        # Start from scratch
        self.i_start = 0

        tc.trace("log duplicated")
        if kwargs.get('check_log_file', True):
            assert (os.path.exists(self.log_fn))