def matrix_to_ext_arr(mat: template(), arr: ext_arr(), as_vector: template()): for I in ti.grouped(mat): for p in ti.static(range(mat.n)): for q in ti.static(range(mat.m)): if ti.static(as_vector): arr[I, p] = mat[I][p] else: arr[I, p, q] = mat[I][p, q]
def gen_normals_kernel(vertices: template(), normals: template()): N = vertices.shape[0] for i in range(N / 3): a = vertices[i * 3] b = vertices[i * 3 + 1] c = vertices[i * 3 + 2] n = (a - b).cross(a - c).normalized() normals[i * 3] = n normals[i * 3 + 1] = n normals[i * 3 + 2] = n
def ndarray_matrix_to_ext_arr(ndarray: any_arr(), arr: ext_arr(), as_vector: template()): for I in ti.grouped(ndarray): for p in ti.static(range(ndarray[I].n)): for q in ti.static(range(ndarray[I].m)): if ti.static(as_vector): arr[I, p] = ndarray[I][p] else: arr[I, p, q] = ndarray[I][p, q]
def gen_normals_kernel_indexed(vertices: template(), indices: template(), normals: template(), weights: template()): num_triangles = indices.shape[0] / 3 num_vertices = vertices.shape[0] for i in range(num_vertices): normals[i] = Vector([0.0, 0.0, 0.0]) weights[i] = 0.0 for i in range(num_triangles): i_a = indices[i * 3] i_b = indices[i * 3 + 1] i_c = indices[i * 3 + 2] a = vertices[i_a] b = vertices[i_b] c = vertices[i_c] n = (a - b).cross(a - c).normalized() atomic_add(normals[i_a], n) atomic_add(normals[i_b], n) atomic_add(normals[i_c], n) atomic_add(weights[i_a], 1.0) atomic_add(weights[i_b], 1.0) atomic_add(weights[i_c], 1.0) for i in range(num_vertices): if weights[i] > 0.0: normals[i] = normals[i] / weights[i]
def vector_to_fast_image(img: template(), out: ext_arr()): # FIXME: Why is ``for i, j in img:`` slower than: for i, j in ti.ndrange(*img.shape): r, g, b = 0, 0, 0 color = img[i, img.shape[1] - 1 - j] if ti.static(img.dtype in [ti.f32, ti.f64]): r, g, b = min(255, max(0, int(color * 255))) else: impl.static_assert(img.dtype == ti.u8) r, g, b = color idx = j * img.shape[0] + i # We use i32 for |out| since OpenGL and Metal doesn't support u8 types if ti.static(get_os_name() != 'osx'): out[idx] = (r << 16) + (g << 8) + b else: # What's -16777216? # # On Mac, we need to set the alpha channel to 0xff. Since Mac's GUI # is big-endian, the color is stored in ABGR order, and we need to # add 0xff000000, which is -16777216 in I32's legit range. (Albeit # the clarity, adding 0xff000000 doesn't work.) alpha = -16777216 out[idx] = (b << 16) + (g << 8) + r + alpha
def tensor_to_image(tensor: template(), arr: ext_arr()): for I in ti.grouped(tensor): t = ti.cast(tensor[I], ti.f32) arr[I, 0] = t arr[I, 1] = t arr[I, 2] = t
def tensor_to_ext_arr(tensor: template(), arr: ext_arr()): for I in ti.grouped(tensor): arr[I] = tensor[I]
def fill_ndarray(ndarray: any_arr(), val: template()): for I in ti.grouped(ndarray): ndarray[I] = val
def to_torch_template(expr: template(), torch_tensor: ext_arr()): for i in expr: torch_tensor[i] = expr[i]
def ext_arr_to_tensor(arr: ext_arr(), tensor: template()): for I in ti.grouped(tensor): tensor[I] = arr[I]
def fill_tensor(tensor: template(), val: template()): for I in ti.grouped(tensor): tensor[I] = val
def clear_loss(l: template()): # Using SNode writers would result in a forced sync, therefore we wrap these # writes into a kernel. l[None] = 0 l.grad[None] = 1
def clear_gradients(vars: template()): for I in ti.grouped(ScalarField(Expr(vars[0]))): for s in ti.static(vars): ScalarField(Expr(s))[I] = 0
def fill_vbo(vbo: template(), value: template(), offset: template(), num_components: template()): for i in vbo: for c in ti.static(range(num_components)): vbo[i][offset + c] = value
def copy_to_vbo(vbo: template(), src: template(), offset: template(), num_components: template()): for i in src: for c in ti.static(range(num_components)): vbo[i][offset + c] = src[i][c]
def vector_to_image(mat: template(), arr: ext_arr()): for I in ti.grouped(mat): for p in ti.static(range(mat.n)): arr[I, p] = ti.cast(mat[I][p], ti.f32) if ti.static(mat.n <= 2): arr[I, 2] = 0
def tensor_to_tensor(tensor: template(), other: template()): for I in ti.grouped(tensor): tensor[I] = other[I]
def fill_matrix(mat: template(), vals: template()): for I in ti.grouped(mat): for p in ti.static(range(mat.n)): for q in ti.static(range(mat.m)): mat[I][p, q] = vals[p][q]
def snode_deactivate(b: template()): for I in ti.grouped(b): ti.deactivate(b, I)
def snode_deactivate_dynamic(b: template()): for I in ti.grouped(b.parent()): ti.deactivate(b, I)
def from_torch_template(expr: template(), torch_tensor: ext_arr()): for i in expr: expr[i] = torch_tensor[i]
def fill_ndarray_matrix(ndarray: any_arr(), val: template()): for I in ti.grouped(ndarray): ndarray[I].fill(val)