コード例 #1
0
ファイル: adams.py プロジェクト: tongtongliuliu/tfdiffeq
    def __init__(self,
                 func,
                 y0,
                 rtol,
                 atol,
                 implicit=True,
                 first_step=None,
                 max_order=_MAX_ORDER,
                 safety=0.9,
                 ifactor=10.0,
                 dfactor=0.2,
                 **unused_kwargs):
        _handle_unused_kwargs(self, unused_kwargs)
        del unused_kwargs

        self.func = func
        self.y0 = y0
        self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
        self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
        self.implicit = implicit
        self.first_step = first_step
        self.max_order = int(max(_MIN_ORDER, min(max_order, _MAX_ORDER)))
        self.safety = _convert_to_tensor(safety,
                                         dtype=tf.float64,
                                         device=y0[0].device)
        self.ifactor = _convert_to_tensor(ifactor,
                                          dtype=tf.float64,
                                          device=y0[0].device)
        self.dfactor = _convert_to_tensor(dfactor,
                                          dtype=tf.float64,
                                          device=y0[0].device)
コード例 #2
0
ファイル: bosh3.py プロジェクト: tongtongliuliu/tfdiffeq
    def __init__(self,
                 func,
                 y0,
                 rtol,
                 atol,
                 first_step=None,
                 safety=0.9,
                 ifactor=10.0,
                 dfactor=0.2,
                 max_num_steps=2**31 - 1,
                 **unused_kwargs):
        _handle_unused_kwargs(self, unused_kwargs)
        del unused_kwargs

        self.func = func
        self.y0 = y0
        self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
        self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
        self.first_step = first_step
        self.safety = _convert_to_tensor(safety,
                                         dtype=tf.float64,
                                         device=y0[0].device)
        self.ifactor = _convert_to_tensor(ifactor,
                                          dtype=tf.float64,
                                          device=y0[0].device)
        self.dfactor = _convert_to_tensor(dfactor,
                                          dtype=tf.float64,
                                          device=y0[0].device)
        self.max_num_steps = _convert_to_tensor(max_num_steps,
                                                dtype=tf.int32,
                                                device=y0[0].device)
コード例 #3
0
def _interp_evaluate(coefficients, t0, t1, t):
    """Evaluate polynomial interpolation at the given time point.

    Args:
        coefficients: list of Tensor coefficients as created by `interp_fit`.
        t0: scalar float64 Tensor giving the start of the interval.
        t1: scalar float64 Tensor giving the end of the interval.
        t: scalar float64 Tensor giving the desired interpolation point.

    Returns:
        Polynomial interpolation of the coefficients at time `t`.
    """

    dtype = coefficients[0][0].dtype
    device = coefficients[0][0].device

    t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
    t1 = _convert_to_tensor(t1, dtype=dtype, device=device)
    t = _convert_to_tensor(t, dtype=dtype, device=device)

    assert (t0 <= t) & (
        t <=
        t1), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(
            t0, t, t1)
    x = tf.cast(((t - t0) / (t1 - t0)), dtype)
    x = move_to_device(x, device)

    xs = [move_to_device(tf.convert_to_tensor(1, dtype=dtype), device), x]
    for _ in range(2, len(coefficients)):
        xs.append(xs[-1] * x)

    return tuple(
        _dot_product(coefficients_, reversed(xs))
        for coefficients_ in zip(*coefficients))
コード例 #4
0
 def before_integrate(self, t):
     if self.first_step is None:
         first_step = _convert_to_tensor(_select_initial_step(self.func, t[0], self.y0, 4, self.rtol, self.atol),
                                         device=t.device)
     else:
         first_step = _convert_to_tensor(0.01, dtype=t.dtype, device=t.device)
     self.rk_state = _RungeKuttaState(
         self.y0,
         cast_double(self.func(t[0], self.y0)), t[0], t[0], first_step,
         tuple(map(lambda x: [x] * 7, self.y0))
     )
コード例 #5
0
ファイル: tsit5.py プロジェクト: yohanesnuwara/tfdiffeq
    def before_integrate(self, t):
        if self.first_step is None:
            first_step = _convert_to_tensor(_select_initial_step(self.func, t[0], self.y0, 4, self.rtol, self.atol),
                                            device=t.device)
        else:
            first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)

        self.rk_state = _RungeKuttaState(
            self.y0,
            self.func(t[0], self.y0), t[0], t[0], first_step,
            [self.y0] * 7
        )
コード例 #6
0
 def before_integrate(self, t):
     f0 = self.func(tf.cast(t[0], self.y0[0].dtype), self.y0)
     if self.first_step is None:
         first_step = _select_initial_step(self.func, t[0], self.y0, 1, self.rtol[0], self.atol[0], f0=f0)
         first_step = move_to_device(tf.cast(first_step, t.dtype), t.device)
     else:
         first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
     self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
コード例 #7
0
ファイル: adams.py プロジェクト: yohanesnuwara/tfdiffeq
    def advance(self, final_t):
        final_t = _convert_to_tensor(final_t, device=self.vcabm_state.prev_t[0].device)
        while final_t > self.vcabm_state.prev_t[0]:
            # print("VCABM State T = ", final_t.numpy(), self.vcabm_state.y_n)
            self.vcabm_state = self._adaptive_adams_step(self.vcabm_state, final_t)

        assert tf.equal(final_t, self.vcabm_state.prev_t[0])
        return self.vcabm_state.y_n
コード例 #8
0
def _runge_kutta_step(func, y0, f0, t0, dt, tableau):
    """Take an arbitrary Runge-Kutta step and estimate error.

    Args:
        func: Function to evaluate like `func(t, y)` to compute the time derivative
            of `y`.
        y0: Tensor initial value for the state.
        f0: Tensor initial value for the derivative, computed from `func(t0, y0)`.
        t0: float64 scalar Tensor giving the initial time.
        dt: float64 scalar Tensor giving the size of the desired time step.
        tableau: optional _ButcherTableau describing how to take the Runge-Kutta
            step.
        name: optional name for the operation.

    Returns:
        Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
        the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
        estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
        calculating these terms.
    """
    y0 = cast_double(y0)
    f0 = cast_double(f0)

    dtype = y0[0].dtype
    device = y0[0].device

    t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
    dt = _convert_to_tensor(dt, dtype=dtype, device=device)

    k = tuple(map(lambda x: [x], f0))
    for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
        ti = t0 + alpha_i * dt
        yi = tuple(y0_ + _scaled_dot_product(dt, cast_double(beta_i), k_)
                   for y0_, k_ in zip(y0, k))
        tuple(k_.append(cast_double(f_)) for k_, f_ in zip(k, func(ti, yi)))

    if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]):
        # This property (true for Dormand-Prince) lets us save a few FLOPs.
        yi = tuple(y0_ + _scaled_dot_product(dt, tableau.c_sol, k_)
                   for y0_, k_ in zip(y0, k))

    y1 = yi
    f1 = tuple(k_[-1] for k_ in k)
    y1_error = tuple(_scaled_dot_product(dt, tableau.c_error, k_) for k_ in k)
    return (y1, f1, y1_error, k)
コード例 #9
0
ファイル: tsit5.py プロジェクト: yohanesnuwara/tfdiffeq
def _optimal_step_size(last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5):
    """Calculate the optimal size for the next Runge-Kutta step."""
    if mean_error_ratio == 0:
        return last_step * ifactor
    if mean_error_ratio < 1:
        dfactor = _convert_to_tensor(1., dtype=tf.float64, device=mean_error_ratio.device)
    error_ratio = tf.cast(mean_error_ratio, last_step.dtype)
    exponent = tf.convert_to_tensor(1. / order, dtype=last_step.dtype)
    factor = tf.maximum(1. / ifactor, tf.minimum((error_ratio ** exponent) / safety, 1. / dfactor))
    return last_step / factor