Beispiel #1
0
def nn1(t: ti.i32):
    for i in range(n_hidden):
        actuation = 0.0
        for j in ti.static(range(n_sin_waves)):
            actuation += weights1[i, j] * ti.sin(spring_omega * t * dt +
                                                 2 * math.pi / n_sin_waves * j)
        for j in ti.static(range(n_objects)):
            offset = x[t, j] - x[t, head_id]
            # use a smaller weight since there are too many of them
            actuation += weights1[i, j * 6 + n_sin_waves] * offset[0] * 0.05
            actuation += weights1[i,
                                  j * 6 + n_sin_waves + 1] * offset[1] * 0.05
            actuation += weights1[i, j * 6 + n_sin_waves + 2] * v[t,
                                                                  i][0] * 0.05
            actuation += weights1[i, j * 6 + n_sin_waves + 3] * v[t,
                                                                  i][1] * 0.05
            actuation += weights1[i, j * 6 + n_sin_waves +
                                  4] * rotation[t, i] * 0.05
            actuation += weights1[i, j * 6 + n_sin_waves + 5] * omega[t,
                                                                      i] * 0.05

        actuation += weights1[i, n_objects * 6 + n_sin_waves] * goal[None][0]
        actuation += weights1[i,
                              n_objects * 6 + n_sin_waves + 1] * goal[None][1]
        actuation += bias1[i]
        actuation = ti.tanh(actuation)
        hidden[t, i] = actuation
Beispiel #2
0
def nn2(t: ti.i32):
    for i in range(n_gravitation):
        act = 0.0
        for j in ti.static(range(n_hidden)):
            act += hidden[t, j] * weight2[j, i]
        act += bias2[i]
        gravitation[t, i] = ti.tanh(act)
def nn1(t: ti.i32):
    for i in range(n_hidden):
        actuation = 0.0
        for j in ti.static(range(n_sin_waves)):
            actuation += weights1[i, j] * ti.sin(spring_omega * t * dt +
                                                 2 * math.pi / n_sin_waves * j)
        for j in ti.static(range(n_objects)):
            offset = x[t, j] - center[t]
            # use a smaller weight since there are too many of them
            actuation += weights1[i, j * 4 + n_sin_waves] * offset[0] * 0.05
            actuation += weights1[i,
                                  j * 4 + n_sin_waves + 1] * offset[1] * 0.05
            actuation += weights1[i, j * 4 + n_sin_waves + 2] * v[t,
                                                                  j][0] * 0.05
            actuation += weights1[i, j * 4 + n_sin_waves + 3] * v[t,
                                                                  j][1] * 0.05
        if ti.static(duplicate_v > 0):
            for j in ti.static(range(duplicate_v)):
                actuation += weights1[i, n_objects * 4 + n_sin_waves + j * 2] * target_v[t][0]
                actuation += weights1[i, n_objects * 4 + n_sin_waves + j * 2 + 1] * target_v[t][1]
        if ti.static(duplicate_h > 0):
            for j in ti.static(range(duplicate_h)):
                actuation += weights1[i, n_objects * 4 + n_sin_waves + duplicate_v * 2 + j] * target_h[None]
        actuation += bias1[i]
        actuation = ti.tanh(actuation)
        hidden[t, i] = actuation
Beispiel #4
0
def compute_actuation(t: ti.i32):
    for i in range(n_actuators):
        act = 0.0
        for j in ti.static(range(n_sin_waves)):
            act += weights[i, j] * ti.sin(actuation_omega * t * dt +
                                          2 * math.pi / n_sin_waves * j)
        act += bias[i]
        actuation[t, i] = ti.tanh(act)
Beispiel #5
0
def nn2(t: ti.i32):
    for i in range(n_springs):
        actuation = 0.0
        for j in ti.static(range(n_hidden)):
            actuation += weights2[i, j] * hidden[t, j]
        actuation += bias2[i]
        actuation = ti.tanh(actuation)
        act[t, i] = actuation
Beispiel #6
0
 def bias(t: ti.i32):
     for i in range(d1):
         act = h1_prev[t, i] + b[i]
         if ti.static(activation == 'relu'):
             act = ti.max(act, 0.)
         if ti.static(activation == 'tanh'):
             act = ti.tanh(act)
         h1[t, i] = act
Beispiel #7
0
def thinc(wl, wc, wr, beta):
    w0 = wc
    w1 = wc
    if (wr - wc) * (wc - wl) > 0.0:
        # use thinc reconstruction
        eps = 1.0e-15
        wmin = min(wr, wl)
        wmax = max(wr, wl)
        wdelta = wmax - wmin
        theta = sign(wr - wl)
        C = (wc - wmin + eps) / (wdelta + eps)
        B = ti.exp(theta * beta * (2 * C - 1))
        A = (B / cosh(beta) - 1) / ti.tanh(beta)

        # reconstructed value on right side of left face
        w0 = wmin + wdelta / 2.0 * (1.0 + theta * A)

        # reconstructed value on left side of right face
        w1 = wmin + wdelta / 2.0 * (1.0 + theta * (ti.tanh(beta) + A) /
                                    (1.0 + A * ti.tanh(beta)))

    return w0, w1
Beispiel #8
0
def nn1(t: ti.i32):
    for i in range(n_hidden):
        act = 0.0
        act += (x[t][0] - 0.5) * weight1[0, i]
        act += (x[t][1] - 0.5) * weight1[1, i]
        act += v[t][0] * weight1[2, i]
        act += v[t][1] * weight1[3, i]
        act += (goal[t][0] - 0.5) * weight1[4, i]
        act += (goal[t][1] - 0.5) * weight1[5, i]
        act += (goal_v[t][0] - 0.5) * weight1[6, i]
        act += (goal_v[t][1] - 0.5) * weight1[7, i]
        act += bias1[i]
        hidden[t, i] = ti.tanh(act)
Beispiel #9
0
 def _forward(self, t: ti.i32, nn_input: ti.template()):
     for model_id, k, i, j in ti.ndrange(self.n_models, self.batch_size,
                                         self.n_hidden, self.n_input):
         self.hidden[model_id, t, k,
                     i] += self.weights1[model_id, i,
                                         j] * nn_input[model_id, t, k, j]
     if ti.static(self.activation):
         for model_id, k, i in ti.ndrange(self.n_models, self.batch_size,
                                          self.n_hidden):
             self.output[model_id, t, k,
                         i] = ti.tanh(self.hidden[model_id, t, k, i] +
                                      self.bias1[model_id, i])
     else:
         for model_id, k, i in ti.ndrange(self.n_models, self.batch_size,
                                          self.n_hidden):
             self.output[model_id, t, k,
                         i] = self.hidden[model_id, t, k,
                                          i] + self.bias1[model_id, i]
Beispiel #10
0
 def func():
     xi[0] = -yi[None]
     xi[1] = ~yi[None]
     xi[2] = ti.logical_not(yi[None])
     xi[3] = ti.abs(yi[None])
     xf[0] = -yf[None]
     xf[1] = ti.abs(yf[None])
     xf[2] = ti.sqrt(yf[None])
     xf[3] = ti.sin(yf[None])
     xf[4] = ti.cos(yf[None])
     xf[5] = ti.tan(yf[None])
     xf[6] = ti.asin(yf[None])
     xf[7] = ti.acos(yf[None])
     xf[8] = ti.tanh(yf[None])
     xf[9] = ti.floor(yf[None])
     xf[10] = ti.ceil(yf[None])
     xf[11] = ti.exp(yf[None])
     xf[12] = ti.log(yf[None])
Beispiel #11
0
def test_trigonometric():
  grad_test(lambda x: ti.tanh(x), lambda x: np.tanh(x))
  grad_test(lambda x: ti.sin(x), lambda x: np.sin(x))
  grad_test(lambda x: ti.cos(x), lambda x: np.cos(x))
  grad_test(lambda x: ti.acos(x), lambda x: np.arccos(x))
  grad_test(lambda x: ti.asin(x), lambda x: np.arcsin(x))
Beispiel #12
0
def nonlinear1():
    for i in range(n_hidden):
        output1_nonlinear[i] = ti.tanh(output1[i])
Beispiel #13
0
    lambda x: x * x,
    lambda x: x**2,
    lambda x: x * x * x,
    lambda x: x * x * x * x,
    lambda x: 0.4 * x * x - 3,
    lambda x: (x - 3) * (x - 1),
    lambda x: (x - 3) * (x - 1) + x * x,
])
@if_has_autograd
@test_utils.test()
def test_poly(tifunc):
    grad_test(tifunc)


@pytest.mark.parametrize('tifunc,npfunc', [
    (lambda x: ti.tanh(x), lambda x: np.tanh(x)),
    (lambda x: ti.sin(x), lambda x: np.sin(x)),
    (lambda x: ti.cos(x), lambda x: np.cos(x)),
    (lambda x: ti.acos(x), lambda x: np.arccos(x)),
    (lambda x: ti.asin(x), lambda x: np.arcsin(x)),
])
@if_has_autograd
@test_utils.test(exclude=[ti.vulkan])
def test_trigonometric(tifunc, npfunc):
    grad_test(tifunc, npfunc)


@pytest.mark.parametrize('tifunc', [
    lambda x: 1 / x,
    lambda x: (x + 1) / (x - 1),
    lambda x: (x + 1) * (x + 2) / ((x - 1) * (x + 3)),