Пример #1
0
def identity(x):
    """The identity linear transform.

    Args:
        x: PhasePoint

    Returns: PhasePoint
    """
    return nest_map(tf.identity, x)
Пример #2
0
    def step_forward(t, dt, x):
        k1 = f(t, x)

        new_t = t + dt / 2
        new_x = nest_map(lambda x, k1: x + k1 * dt / 2, x, k1)
        k2 = f(new_t, new_x)

        new_t = t + dt / 2
        new_x = nest_map(lambda x, k2: x + k2 * dt / 2, x, k2)
        k3 = f(new_t, new_x)

        new_t = t + dt
        new_x = nest_map(lambda x, k3: x + k3 * dt, x, k3)
        k4 = f(new_t, new_x)

        new_x = nest_map(
            lambda x, k1, k2, k3, k4: x + (k1 + 2 * k2 + 2 * k3 + k4) / 6 * dt,
            x, k1, k2, k3, k4)
        return new_x
Пример #3
0
    def aug_dynamics(time, aug_phase_point):
        state, adjoint, *_ = aug_phase_point
        neg_adjoint = nest_map(lambda x: -1 * x, adjoint)

        with tf.GradientTape() as g:
            g.watch(state)
            output = network(time, state)
        # According to
        # # https://www.tensorflow.org/api_docs/python/tf/custom_gradient
        # `tf.gradients` or `g.gradient`, if the third argument is filled,
        # returns the vector-Jacobian-products directly. In fact, TF
        # implements VJP inside, and compute gradients via VJP.
        vjps = g.gradient(output, [state] + variables,
                          neg_adjoint,
                          unconnected_gradients='zero')

        new_aug_phase_point = [output] + vjps
        return new_aug_phase_point
Пример #4
0
 def step_forward(t, dt, x):
     k1 = f(t, x)
     new_x = nest_map(lambda x, k1: x + k1 * dt, x, k1)
     return new_x
Пример #5
0
 def cast(x, dtype):
     return nest_map(lambda x: tf.cast(x, dtype), x)
Пример #6
0
 def rescale_fn(x):
     return nest_map(_rescale_fn, x)