def ext_arr_to_matrix(arr: ext_arr(), mat: template(), as_vector: template()): for I in ti.grouped(mat): for p in ti.static(range(mat.n)): for q in ti.static(range(mat.m)): if ti.static(as_vector): mat[I][p] = arr[I, p] else: mat[I][p, q] = arr[I, p, q]
def extract_arguments(self): sig = inspect.signature(self.func) if sig.return_annotation not in (inspect._empty, None): self.return_type = sig.return_annotation params = sig.parameters arg_names = params.keys() for i, arg_name in enumerate(arg_names): param = params[arg_name] if param.kind == inspect.Parameter.VAR_KEYWORD: raise KernelDefError( 'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)' ) if param.kind == inspect.Parameter.VAR_POSITIONAL: raise KernelDefError( 'Taichi kernels do not support variable positional parameters (i.e., *args)' ) if param.default is not inspect.Parameter.empty: raise KernelDefError( 'Taichi kernels do not support default values for arguments' ) if param.kind == inspect.Parameter.KEYWORD_ONLY: raise KernelDefError( 'Taichi kernels do not support keyword parameters') if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: raise KernelDefError( 'Taichi kernels only support "positional or keyword" parameters' ) annotation = param.annotation if param.annotation is inspect.Parameter.empty: if i == 0 and self.classkernel: # The |self| parameter annotation = template() else: _taichi_skip_traceback = 1 raise KernelDefError( 'Taichi kernels parameters must be type annotated') else: if isinstance(annotation, (template, ext_arr, any_arr)): pass elif id(annotation) in primitive_types.type_ids: pass elif isinstance(annotation, sparse_matrix_builder): pass else: _taichi_skip_traceback = 1 raise KernelDefError( f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}' ) self.argument_annotations.append(annotation) self.argument_names.append(param.name)
def extract_arguments(self): sig = inspect.signature(self.func) if sig.return_annotation not in (inspect._empty, None): self.return_type = sig.return_annotation params = sig.parameters arg_names = params.keys() for i, arg_name in enumerate(arg_names): param = params[arg_name] if param.kind == inspect.Parameter.VAR_KEYWORD: raise KernelDefError( 'Taichi functions do not support variable keyword parameters (i.e., **kwargs)' ) if param.kind == inspect.Parameter.VAR_POSITIONAL: raise KernelDefError( 'Taichi functions do not support variable positional parameters (i.e., *args)' ) if param.kind == inspect.Parameter.KEYWORD_ONLY: raise KernelDefError( 'Taichi functions do not support keyword parameters') if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: raise KernelDefError( 'Taichi functions only support "positional or keyword" parameters' ) annotation = param.annotation if annotation is inspect.Parameter.empty: if i == 0 and self.classfunc: annotation = template() else: if id(annotation) in primitive_types.type_ids: ti.warning( 'Data type annotations are unnecessary for Taichi' ' functions, consider removing it', stacklevel=4) elif not isinstance(annotation, template): raise KernelDefError( f'Invalid type annotation (argument {i}) of Taichi function: {annotation}' ) self.arguments.append(annotation) self.argument_names.append(param.name)
def vector_to_fast_image(img: template(), out: ext_arr()): # FIXME: Why is ``for i, j in img:`` slower than: for i, j in ti.ndrange(*img.shape): r, g, b = 0, 0, 0 color = img[i, img.shape[1] - 1 - j] if ti.static(img.dtype in [ti.f32, ti.f64]): r, g, b = min(255, max(0, int(color * 255))) else: impl.static_assert(img.dtype == ti.u8) r, g, b = color idx = j * img.shape[0] + i # We use i32 for |out| since OpenGL and Metal doesn't support u8 types if ti.static(settings.get_os_name() != 'osx'): out[idx] = (r << 16) + (g << 8) + b else: # What's -16777216? # # On Mac, we need to set the alpha channel to 0xff. Since Mac's GUI # is big-endian, the color is stored in ABGR order, and we need to # add 0xff000000, which is -16777216 in I32's legit range. (Albeit # the clarity, adding 0xff000000 doesn't work.) alpha = -16777216 out[idx] = (b << 16) + (g << 8) + r + alpha
def ext_arr_to_tensor(arr: ext_arr(), tensor: template()): for I in ti.grouped(tensor): tensor[I] = arr[I]
def tensor_to_tensor(tensor: template(), other: template()): for I in ti.grouped(tensor): tensor[I] = other[I]
def vector_to_image(mat: template(), arr: ext_arr()): for I in ti.grouped(mat): for p in ti.static(range(mat.n)): arr[I, p] = ti.cast(mat[I][p], ti.f32) if ti.static(mat.n <= 2): arr[I, 2] = 0
def tensor_to_image(tensor: template(), arr: ext_arr()): for I in ti.grouped(tensor): t = ti.cast(tensor[I], ti.f32) arr[I, 0] = t arr[I, 1] = t arr[I, 2] = t
def snode_deactivate_dynamic(b: template()): for I in ti.grouped(b.parent()): ti.deactivate(b, I)
def fill_tensor(tensor: template(), val: template()): for I in ti.grouped(tensor): tensor[I] = val
def snode_deactivate(b: template()): for I in ti.grouped(b): ti.deactivate(b, I)
def fill_matrix(mat: template(), vals: template()): for I in ti.grouped(mat): for p in ti.static(range(mat.n)): for q in ti.static(range(mat.m)): mat[I][p, q] = vals[p][q]
def clear_loss(l: template()): # Using SNode writers would result in a forced sync, therefore we wrap these # writes into a kernel. l[None] = 0 l.grad[None] = 1
def clear_gradients(vars: template()): for I in ti.grouped(ScalarField(Expr(vars[0]))): for s in ti.static(vars): ScalarField(Expr(s))[I] = 0
def to_torch_template(expr: template(), torch_tensor: ext_arr()): for i in expr: torch_tensor[i] = expr[i]
def from_torch_template(expr: template(), torch_tensor: ext_arr()): for i in expr: expr[i] = torch_tensor[i]
def tensor_to_ext_arr(tensor: template(), arr: ext_arr()): for I in ti.grouped(tensor): arr[I] = tensor[I]