def step_func(self, func, t, dt, y):
        self._update_history(t, func(t, y))
        order = min(len(self.prev_f), self.max_order - 1)
        if order < _MIN_ORDER - 1:
            # Compute using RK4.
            dy = rk_common.rk4_alt_step_func(func, t, dt, y, k1=self.prev_f[0])
            return dy
        else:
            # Adams-Bashforth predictor.
            bashforth_coeffs = _BASHFORTH_COEFFICIENTS[order]
            ab_div = _DIVISOR[order]
            dy = tuple(dt * _scaled_dot_product(1 / ab_div, bashforth_coeffs, f_) for f_ in zip(*self.prev_f))

            # Adams-Moulton corrector.
            if self.implicit:
                moulton_coeffs = _MOULTON_COEFFICIENTS[order + 1]
                am_div = _DIVISOR[order + 1]
                delta = tuple(dt * _scaled_dot_product(1 / am_div, moulton_coeffs[1:], f_) for f_ in zip(*self.prev_f))
                converged = False
                for _ in range(self.max_iters):
                    dy_old = dy
                    f = func(t + dt, tuple(y_ + dy_ for y_, dy_ in zip(y, dy)))
                    dy = tuple(dt * (moulton_coeffs[0] / am_div) * f_ + delta_ for f_, delta_ in zip(f, delta))
                    converged = _has_converged(dy_old, dy, self.rtol, self.atol)
                    if converged:
                        break
                if not converged:
                    print('Warning: Functional iteration did not converge. Solution may be incorrect.', file=sys.stderr)
                    self.prev_f.pop()
                self._update_history(t, f)
            return dy
Ejemplo n.º 2
0
def _interp_eval_tsit5(t0, t1, k, eval_t):
    dt = t1 - t0
    y0 = tuple(k_[0] for k_ in k)
    interp_coeff = _interp_coeff_tsit5(t0, dt, eval_t)
    y_t = tuple(y0_ + _scaled_dot_product(dt, interp_coeff, k_)
                for y0_, k_ in zip(y0, k))
    return y_t
Ejemplo n.º 3
0
def _interp_fit_dopri5(y0,
                       y1,
                       k,
                       dt,
                       tableau=_DORMAND_PRINCE_SHAMPINE_TABLEAU):
    """Fit an interpolating polynomial to the results of a Runge-Kutta step."""
    dt = dt.type_as(y0[0])
    y_mid = tuple(y0_ + _scaled_dot_product(dt, DPS_C_MID, k_)
                  for y0_, k_ in zip(y0, k))
    f0 = tuple(k_[0] for k_ in k)
    f1 = tuple(k_[-1] for k_ in k)
    return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _runge_kutta_step(func, y0, f0, t0, dt, tableau):
    """Take an arbitrary Runge-Kutta step and estimate error.

    Args:
        func: Function to evaluate like `func(t, y)` to compute the time derivative
            of `y`.
        y0: Tensor initial value for the state.
        f0: Tensor initial value for the derivative, computed from `func(t0, y0)`.
        t0: float64 scalar Tensor giving the initial time.
        dt: float64 scalar Tensor giving the size of the desired time step.
        tableau: optional _ButcherTableau describing how to take the Runge-Kutta
            step.
        name: optional name for the operation.

    Returns:
        Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
        the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
        estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
        calculating these terms.
    """
    dtype = y0[0].dtype
    device = y0[0].device

    t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
    dt = _convert_to_tensor(dt, dtype=dtype, device=device)

    k = tuple(map(lambda x: [x], f0))
    for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
        ti = t0 + alpha_i * dt
        yi = tuple(y0_ + _scaled_dot_product(dt, beta_i, k_) for y0_, k_ in zip(y0, k))
        tuple(k_.append(f_) for k_, f_ in zip(k, func(ti, yi)))

    if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]):
        # This property (true for Dormand-Prince) lets us save a few FLOPs.
        yi = tuple(y0_ + _scaled_dot_product(dt, tableau.c_sol, k_) for y0_, k_ in zip(y0, k))

    y1 = yi
    f1 = tuple(k_[-1] for k_ in k)
    y1_error = tuple(_scaled_dot_product(dt, tableau.c_error, k_) for k_ in k)
    return (y1, f1, y1_error, k)
Ejemplo n.º 5
0
    def _adaptive_adams_step(self, vcabm_state, final_t):
        y0, prev_f, prev_t, next_t, prev_phi, order = vcabm_state
        if next_t > final_t:
            next_t = final_t
        dt = (next_t - prev_t[0])
        dt_cast = dt.to(y0[0])

        # Explicit predictor step.
        g, phi = g_and_explicit_phi(prev_t, next_t, prev_phi, order)
        g = g.to(y0[0])
        p_next = tuple(
            y0_ +
            _scaled_dot_product(dt_cast, g[:max(1, order -
                                                1)], phi_[:max(1, order - 1)])
            for y0_, phi_ in zip(y0, tuple(zip(*phi))))

        # Update phi to implicit.
        next_f0 = self.func(next_t.to(p_next[0]), p_next)
        implicit_phi_p = compute_implicit_phi(phi, next_f0, order + 1)

        # Implicit corrector step.
        y_next = tuple(p_next_ + dt_cast * g[order - 1] * iphi_
                       for p_next_, iphi_ in zip(p_next, implicit_phi_p[order -
                                                                        1]))

        # Error estimation.
        tolerance = tuple(atol_ +
                          rtol_ * torch.max(torch.abs(y0_), torch.abs(y1_))
                          for atol_, rtol_, y0_, y1_ in zip(
                              self.atol, self.rtol, y0, y_next))
        local_error = tuple(dt_cast * (g[order] - g[order - 1]) * iphi_
                            for iphi_ in implicit_phi_p[order])
        error_k = _compute_error_ratio(local_error, tolerance)
        accept_step = (torch.tensor(error_k) <= 1).all()

        if not accept_step:
            # Retry with adjusted step size if step is rejected.
            dt_next = _optimal_step_size(dt,
                                         error_k,
                                         self.safety,
                                         self.ifactor,
                                         self.dfactor,
                                         order=order)
            return _VCABMState(y0,
                               prev_f,
                               prev_t,
                               prev_t[0] + dt_next,
                               prev_phi,
                               order=order)

        # We accept the step. Evaluate f and update phi.
        next_f0 = self.func(next_t.to(p_next[0]), y_next)
        implicit_phi = compute_implicit_phi(phi, next_f0, order + 2)

        next_order = order

        if len(prev_t) <= 4 or order < 3:
            next_order = min(order + 1, 3, self.max_order)
        else:
            error_km1 = _compute_error_ratio(
                tuple(dt_cast * (g[order - 1] - g[order - 2]) * iphi_
                      for iphi_ in implicit_phi_p[order - 1]), tolerance)
            error_km2 = _compute_error_ratio(
                tuple(dt_cast * (g[order - 2] - g[order - 3]) * iphi_
                      for iphi_ in implicit_phi_p[order - 2]), tolerance)
            if min(error_km1 + error_km2) < max(error_k):
                next_order = order - 1
            elif order < self.max_order:
                error_kp1 = _compute_error_ratio(
                    tuple(dt_cast * gamma_star[order] * iphi_
                          for iphi_ in implicit_phi_p[order]), tolerance)
                if max(error_kp1) < max(error_k):
                    next_order = order + 1

        # Keep step size constant if increasing order. Else use adaptive step size.
        dt_next = dt if next_order > order else _optimal_step_size(
            dt,
            error_k,
            self.safety,
            self.ifactor,
            self.dfactor,
            order=order + 1)

        prev_f.appendleft(next_f0)
        prev_t.appendleft(next_t)
        return _VCABMState(p_next,
                           prev_f,
                           prev_t,
                           next_t + dt_next,
                           implicit_phi,
                           order=next_order)