Example #1
0
def expm_krylov(Afunc, dt, vstart: xp.ndarray, block_size=50):
    """
    Compute Krylov subspace approximation of the matrix exponential
    applied to input vector: `expm(dt*A)*v`.
    A is a hermitian matrix.
    Reference:
        M. Hochbruck and C. Lubich
        On Krylov subspace approximations to the matrix exponential operator
        SIAM J. Numer. Anal. 34, 1911 (1997)
    """

    # normalize starting vector
    vstart = xp.asarray(vstart)
    nrmv = float(xp.linalg.norm(vstart))
    assert nrmv > 0
    vstart = vstart / nrmv

    alpha = np.zeros(block_size)
    beta = np.zeros(block_size - 1)

    V = xp.empty((block_size, len(vstart)), dtype=vstart.dtype)
    V[0] = vstart
    res = None

    for j in range(len(vstart)):

        w = Afunc(V[j])
        alpha[j] = xp.vdot(w, V[j]).real

        if j == len(vstart) - 1:
            #logger.debug("the krylov subspace is equal to the full space")
            return _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1, :].T, nrmv,
                                dt), j + 1

        if len(V) == j + 1:
            V, old_V = xp.empty((len(V) + block_size, len(vstart)),
                                dtype=vstart.dtype), V
            V[:len(old_V)] = old_V
            del old_V
            alpha = np.concatenate([alpha, np.zeros(block_size)])
            beta = np.concatenate([beta, np.zeros(block_size)])

        w -= alpha[j] * V[j] + (beta[j - 1] * V[j - 1] if j > 0 else 0)
        beta[j] = xp.linalg.norm(w)
        if beta[j] < 100 * len(vstart) * np.finfo(float).eps:
            # logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.')
            return _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1, :].T, nrmv,
                                dt), j + 1

        if 3 < j and j % 2 == 0:
            new_res = _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1].T, nrmv,
                                   dt)
            if res is not None and xp.allclose(res, new_res):
                return new_res, j + 1
            else:
                res = new_res
        V[j + 1] = w / beta[j]
Example #2
0
 def _call_impl(self, t):
     if t.ndim == 0:
         return self.value
     else:
         ret = xp.empty((self.value.shape[0], t.shape[0]))
         ret[:] = self.value[:, None]
         return ret
Example #3
0
def prepare_events(events):
    """Standardize event functions and extract is_terminal and direction."""
    if callable(events):
        events = (events, )

    if events is not None:
        is_terminal = xp.empty(len(events), dtype=bool)
        direction = xp.empty(len(events))
        for i, event in enumerate(events):
            try:
                is_terminal[i] = event.terminal
            except AttributeError:
                is_terminal[i] = False

            try:
                direction[i] = event.direction
            except AttributeError:
                direction[i] = 0
    else:
        is_terminal = None
        direction = None

    return events, is_terminal, direction
Example #4
0
 def __init__(self,
              fun,
              t0,
              y0,
              t_bound,
              max_step=xp.inf,
              rtol=1e-3,
              atol=1e-6,
              vectorized=False,
              first_step=None,
              **extraneous):
     warn_extraneous(extraneous)
     super(RungeKutta, self).__init__(fun,
                                      t0,
                                      y0,
                                      t_bound,
                                      vectorized,
                                      support_complex=True)
     self.y_old = None
     self.max_step = validate_max_step(max_step)
     self.rtol, self.atol = validate_tol(rtol, atol, self.n)
     self.f = self.fun(self.t, self.y)
     if first_step is None:
         self.h_abs = select_initial_step(
             self.fun,
             self.t,
             self.y,
             self.f,
             self.direction,
             self.order,
             self.rtol,
             self.atol,
         )
     else:
         self.h_abs = validate_first_step(first_step, t0, t_bound)
     self.K = xp.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
Example #5
0
def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
    n = y.shape[0]
    n_groups = xp.max(groups) + 1
    h_vecs = xp.empty((n_groups, n))
    for group in range(n_groups):
        e = xp.equal(group, groups)
        h_vecs[group] = h * e
    h_vecs = h_vecs.T

    f_new = fun(t, y[:, None] + h_vecs)
    df = f_new - f[:, None]

    i, j, _ = find(structure)
    diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
    max_ind = xp.array(abs(diff).argmax(axis=0)).ravel()
    r = xp.arange(n)
    max_diff = xp.asarray(xp.abs(diff[max_ind, r])).ravel()
    scale = xp.maximum(xp.abs(f[max_ind]), xp.abs(f_new[max_ind, groups[r]]))

    diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
    if xp.any(diff_too_small):
        ind, = xp.nonzero(diff_too_small)
        new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
        h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
        h_new_all = xp.zeros(n)
        h_new_all[ind] = h_new

        groups_unique = xp.unique(groups[ind])
        groups_map = xp.empty(n_groups, dtype=int)
        h_vecs = xp.empty((groups_unique.shape[0], n))
        for k, group in enumerate(groups_unique):
            e = xp.equal(group, groups)
            h_vecs[k] = h_new_all * e
            groups_map[group] = k
        h_vecs = h_vecs.T

        f_new = fun(t, y[:, None] + h_vecs)
        df = f_new - f[:, None]
        i, j, _ = find(structure[:, ind])
        diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], (i, j)),
                              shape=(n, ind.shape[0])).tocsc()

        max_ind_new = xp.array(abs(diff_new).argmax(axis=0)).ravel()
        r = xp.arange(ind.shape[0])
        max_diff_new = xp.asarray(xp.abs(diff_new[max_ind_new, r])).ravel()
        scale_new = xp.maximum(
            xp.abs(f[max_ind_new]),
            xp.abs(f_new[max_ind_new, groups_map[groups[ind]]]))

        update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
        if xp.any(update):
            update, = xp.nonzero(update)
            update_ind = ind[update]
            factor[update_ind] = new_factor[update]
            h[update_ind] = h_new[update]
            diff[:, update_ind] = diff_new[:, update]
            scale[update_ind] = scale_new[update]
            max_diff[update_ind] = max_diff_new[update]

    diff.data /= xp.repeat(h, xp.diff(diff.indptr))

    factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
    factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
    factor = xp.maximum(factor, NUM_JAC_MIN_FACTOR)

    return diff, factor
Example #6
0
def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
    """Finite differences Jacobian approximation tailored for ODE solvers.

    This function computes finite difference approximation to the Jacobian
    matrix of `fun` with respect to `y` using forward differences.
    The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
    ``d f_i / d y_j``.

    A special feature of this function is the ability to correct the step
    size from iteration to iteration. The main idea is to keep the finite
    difference significantly separated from its round-off error which
    approximately equals ``EPS * xp.abs(f)``. It reduces a possibility of a
    huge error and assures that the estimated derivative are reasonably close
    to the true values (i.e. the finite difference approximation is at least
    qualitatively reflects the structure of the true Jacobian).

    Parameters
    ----------
    fun : callable
        Right-hand side of the system implemented in a vectorized fashion.
    t : float
        Current time.
    y : ndarray, shape (n,)
        Current state.
    f : ndarray, shape (n,)
        Value of the right hand side at (t, y).
    threshold : float
        Threshold for `y` value used for computing the step size as
        ``factor * xp.maximum(xp.abs(y), threshold)``. Typically the value of
        absolute tolerance (atol) for a solver should be passed as `threshold`.
    factor : ndarray with shape (n,) or None
        Factor to use for computing the step size. Pass None for the very
        evaluation, then use the value returned from this function.
    sparsity : tuple (structure, groups) or None
        Sparsity structure of the Jacobian, `structure` must be csc_matrix.

    Returns
    -------
    J : ndarray or csc_matrix, shape (n, n)
        Jacobian matrix.
    factor : ndarray, shape (n,)
        Suggested `factor` for the next evaluation.
    """
    y = xp.asarray(y)
    n = y.shape[0]
    if n == 0:
        return xp.empty((0, 0)), factor

    if factor is None:
        factor = xp.full(n, EPS**0.5)
    else:
        factor = factor.copy()

    # Direct the step as ODE dictates, hoping that such a step won't lead to
    # a problematic region. For complex ODEs it makes sense to use the real
    # part of f as we use steps along real axis.
    f_sign = 2 * (xp.real(f) >= 0).astype(float) - 1
    y_scale = f_sign * xp.maximum(threshold, xp.abs(y))
    h = (y + factor * y_scale) - y

    # Make sure that the step is not 0 to start with. Not likely it will be
    # executed often.
    for i in xp.nonzero(h == 0)[0]:
        while h[i] == 0:
            factor[i] *= 10
            h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]

    if sparsity is None:
        return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
    else:
        structure, groups = sparsity
        return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure,
                               groups)