def test_kernel_template_gradient(): x = ti.global_var(ti.f32) y = ti.global_var(ti.f32) z = ti.global_var(ti.f32) loss = ti.global_var(ti.f32) @ti.layout def tensors(): ti.root.dense(ti.i, 16).place(x, y, z) ti.root.place(loss) ti.root.lazy_grad() @ti.kernel def double(a: ti.template(), b: ti.template()): for i in range(16): b[i] = a[i] * 2 + 1 @ti.kernel def compute_loss(): for i in range(16): ti.atomic_add(loss, z[i]) for i in range(16): x[i] = i with ti.Tape(loss): double(x, y) double(y, z) compute_loss() for i in range(16): assert z[i] == i * 4 + 3 assert x.grad[i] == 4
p_vol = 1 E = 10 # TODO: update mu = E la = E max_steps = 512 steps = 512 gravity = 10 target = [0.8, 0.2, 0.2] use_apic = False scalar = lambda: ti.var(dt=real) vec = lambda: ti.Vector(dim, dt=real) mat = lambda: ti.Matrix(dim, dim, dt=real) actuator_id = ti.global_var(ti.i32) particle_type = ti.global_var(ti.i32) x, v = vec(), vec() grid_v_in, grid_m_in = vec(), scalar() grid_v_out = vec() C, F = mat(), mat() screen = ti.Vector(3, dt=real) loss = scalar() n_sin_waves = 4 weights = scalar() bias = scalar() x_avg = vec()
n_objects = 0 # target_ball = 0 elasticity = 0.0 ground_height = 0.1 gravity = -9.8 friction = 1.0 penalty = 1e4 damping = 10 gradient_clip = 30 spring_omega = 30 default_actuation = 0.05 n_springs = 0 spring_anchor_a = ti.global_var(ti.i32) spring_anchor_b = ti.global_var(ti.i32) # spring_length = -1 means it is a joint spring_length = scalar() spring_offset_a = vec() spring_offset_b = vec() spring_phase = scalar() spring_actuation = scalar() spring_stiffness = scalar() n_sin_waves = 10 n_hidden = 32 weights1 = scalar() bias1 = scalar() hidden = scalar()
import taichi as ti x = ti.global_var(ti.f32) y = ti.global_var(ti.f32) z = ti.global_var(ti.f32) loss = ti.global_var(ti.f32) @ti.layout def tensors(): ti.root.dense(ti.i, 16).place(x, y, z) ti.root.place(loss) ti.root.lazy_grad() def double(a, b): @ti.kernel def kernel(): for i in range(16): b[i] = a[i] * 2 + 1 # Make sure you materialize the kernels immediately (by default they are initialized on first invocation) kernel.materialize() kernel.grad.materialize() # If you need the gradients return kernel @ti.kernel def compute_loss(): for i in range(16): ti.atomic_add(loss, z[i])
import taichi as ti x = ti.global_var(ti.i32) l = ti.global_var(ti.i32) n = 16 # ti.runtime.print_preprocessed = True @ti.layout def lists(): ti.root.dense(ti.i, n).dynamic(ti.j, n).place(x) ti.root.dense(ti.i, n).place(l) @ti.kernel def make_lists(): for i in range(n): for j in range(i): ti.append(x.parent(), i, j * j) l[i] = ti.length(x.parent(), i) make_lists() for i in range(n): assert l[i] == i for j in range(n): assert x[i, j] == (j * j if j < i else 0)
import taichi as ti import matplotlib.pyplot as plt import math import sys x = ti.global_var(dt=ti.f32) v = ti.global_var(dt=ti.f32) a = ti.global_var(dt=ti.f32) loss = ti.global_var(dt=ti.f32) damping = ti.global_var(dt=ti.f32) max_timesteps = 1024 * 1024 dt = 0.001 @ti.layout def place(): ti.root.dense(ti.i, max_timesteps).place(x, v) ti.root.place(a, damping, loss) ti.root.lazy_grad() @ti.kernel def advance(t: ti.i32): v[t] = damping[None] * v[t - 1] + a[None] x[t] = x[t - 1] + dt * v[t] @ti.kernel def compute_loss(t: ti.i32):