def allclose(a, b, rtol=1.0e-5, atol=1.0e-8): if isinstance(a, Matrix): a = a.array else: a = xp.asarray(a) if isinstance(b, Matrix): b = b.array else: b = xp.asarray(b) # delete this when CuPy 6.0 is released if xp == cp: a = cp.asnumpy(a) b = cp.asnumpy(b) return np.allclose(a, b, rtol=rtol, atol=atol)
def __init__(self, ts, interpolants): ts = xp.asarray(ts) d = xp.diff(ts) # The first case covers integration on zero segment. if not ((ts.size == 2 and ts[0] == ts[-1]) or xp.all(d > 0) or xp.all(d < 0)): raise ValueError("`ts` must be strictly increasing or decreasing.") self.n_segments = len(interpolants) if ts.shape != (self.n_segments + 1, ): raise ValueError("Numbers of time stamps and interpolants " "don't match.") self.ts = ts self.interpolants = interpolants if ts[-1] >= ts[0]: self.t_min = ts[0] self.t_max = ts[-1] self.ascending = True self.ts_sorted = ts else: self.t_min = ts[-1] self.t_max = ts[0] self.ascending = False self.ts_sorted = ts[::-1]
def asxp(array: Union[np.ndarray, xp.ndarray, Matrix]) -> xp.ndarray: if array is None: return None if isinstance(array, Matrix): array = array.array if not USE_GPU: assert isinstance(array, np.ndarray) return array return xp.asarray(array)
def expm_krylov(Afunc, dt, vstart: xp.ndarray, block_size=50): """ Compute Krylov subspace approximation of the matrix exponential applied to input vector: `expm(dt*A)*v`. A is a hermitian matrix. Reference: M. Hochbruck and C. Lubich On Krylov subspace approximations to the matrix exponential operator SIAM J. Numer. Anal. 34, 1911 (1997) """ # normalize starting vector vstart = xp.asarray(vstart) nrmv = float(xp.linalg.norm(vstart)) assert nrmv > 0 vstart = vstart / nrmv alpha = np.zeros(block_size) beta = np.zeros(block_size - 1) V = xp.empty((block_size, len(vstart)), dtype=vstart.dtype) V[0] = vstart res = None for j in range(len(vstart)): w = Afunc(V[j]) alpha[j] = xp.vdot(w, V[j]).real if j == len(vstart) - 1: #logger.debug("the krylov subspace is equal to the full space") return _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1, :].T, nrmv, dt), j + 1 if len(V) == j + 1: V, old_V = xp.empty((len(V) + block_size, len(vstart)), dtype=vstart.dtype), V V[:len(old_V)] = old_V del old_V alpha = np.concatenate([alpha, np.zeros(block_size)]) beta = np.concatenate([beta, np.zeros(block_size)]) w -= alpha[j] * V[j] + (beta[j - 1] * V[j - 1] if j > 0 else 0) beta[j] = xp.linalg.norm(w) if beta[j] < 100 * len(vstart) * np.finfo(float).eps: # logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.') return _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1, :].T, nrmv, dt), j + 1 if 3 < j and j % 2 == 0: new_res = _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1].T, nrmv, dt) if res is not None and xp.allclose(res, new_res): return new_res, j + 1 else: res = new_res V[j + 1] = w / beta[j]
def _expm_krylov(alpha, beta, V, v_norm, dt): # diagonalize Hessenberg matrix try: w_hess, u_hess = eigh_tridiagonal(alpha, beta) except np.linalg.LinAlgError: logger.warning("tridiagonal failed") h = np.diag(alpha) + np.diag(beta, k=-1) + np.diag(beta, k=1) w_hess, u_hess = np.linalg.eigh(h) return V @ xp.asarray(u_hess @ (v_norm * np.exp(dt * w_hess) * u_hess[0]))
def find_active_events(g, g_new, direction): """Find which event occurred during an integration step. Parameters ---------- g, g_new : array_like, shape (n_events,) Values of event functions at a current and next points. direction : ndarray, shape (n_events,) Event "direction" according to the definition in `solve_ivp`. Returns ------- active_events : ndarray Indices of events which occurred during the step. """ g, g_new = xp.asarray(g), xp.asarray(g_new) up = (g <= 0) & (g_new >= 0) down = (g >= 0) & (g_new <= 0) either = up | down mask = up & (direction > 0) | down & (direction < 0) | either & (direction == 0) return xp.nonzero(mask)[0]
def validate_tol(rtol, atol, n): """Validate tolerance values.""" if rtol < 100 * EPS: warn("`rtol` is too low, setting to {}".format(100 * EPS)) rtol = 100 * EPS atol = xp.asarray(atol) if atol.ndim > 0 and atol.shape != (n, ): raise ValueError("`atol` has wrong shape.") if xp.any(atol < 0): raise ValueError("`atol` must be positive.") return rtol, atol
def __init__(self, array, dtype=None, is_full_mpdm=False): assert array is not None if dtype == backend.real_dtype: # forbid unchecked casting assert not xp.iscomplexobj(array) if dtype is None: if xp.iscomplexobj(array): dtype = backend.complex_dtype else: dtype = backend.real_dtype self.array: [xp.ndarray] = xp.asarray(array, dtype=dtype) self.original_shape = self.array.shape self.sigmaqn = None self.is_full_mpdm = is_full_mpdm backend.running = True
def handle_events(sol, events, active_events, is_terminal, t_old, t): """Helper function to handle events. Parameters ---------- sol : DenseOutput Function ``sol(t)`` which evaluates an ODE solution between `t_old` and `t`. events : list of callables, length n_events Event functions with signatures ``event(t, y)``. active_events : ndarray Indices of events which occurred. is_terminal : ndarray, shape (n_events,) Which events are terminal. t_old, t : float Previous and new values of time. Returns ------- root_indices : ndarray Indices of events which take zero between `t_old` and `t` and before a possible termination. roots : ndarray Values of t at which events occurred. terminate : bool Whether a terminal event occurred. """ roots = [] for event_index in active_events: roots.append(solve_event_equation(events[event_index], sol, t_old, t)) roots = xp.asarray(roots) if xp.any(is_terminal[active_events]): if t > t_old: order = xp.argsort(roots) else: order = xp.argsort(-roots) active_events = active_events[order] roots = roots[order] t = xp.nonzero(is_terminal[active_events])[0][0] active_events = active_events[:t + 1] roots = roots[:t + 1] terminate = True else: terminate = False return active_events, roots, terminate
def __call__(self, t): """Evaluate the solution. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate at. Returns ------- y : ndarray, shape (n_states,) or (n_states, n_points) Computed values. Shape depends on whether `t` is a scalar or a 1-d array. """ t = xp.asarray(t) if t.ndim == 0: return self._call_single(t) order = xp.argsort(t) reverse = xp.empty_like(order) reverse[order] = xp.arange(order.shape[0]) t_sorted = t[order] # See comment in self._call_single. if self.ascending: segments = xp.searchsorted(self.ts_sorted, t_sorted, side="left") else: segments = xp.searchsorted(self.ts_sorted, t_sorted, side="right") segments -= 1 segments[segments < 0] = 0 segments[segments > self.n_segments - 1] = self.n_segments - 1 if not self.ascending: segments = self.n_segments - 1 - segments ys = [] group_start = 0 for segment, group in groupby(segments): group_end = group_start + len(list(group)) y = self.interpolants[segment](t_sorted[group_start:group_end]) ys.append(y) group_start = group_end ys = xp.hstack(ys) ys = ys[:, reverse] return ys
def __call__(self, t): """Evaluate the interpolant. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate the solution at. Returns ------- y : ndarray, shape (n,) or (n, n_points) Computed values. Shape depends on whether `t` was a scalar or a 1-d array. """ t = xp.asarray(t) if t.ndim > 1: raise ValueError("`t` must be float or 1-d array.") return self._call_impl(t)
def expm_krylov(Afunc, dt, vstart): """ Compute Krylov subspace approximation of the matrix exponential applied to input vector: `expm(dt*A)*v`. Reference: M. Hochbruck and C. Lubich On Krylov subspace approximations to the matrix exponential operator SIAM J. Numer. Anal. 34, 1911 (1997) """ # normalize starting vector vstart = xp.asarray(vstart) nrmv = xp.linalg.norm(vstart) assert nrmv > 0 vstart = vstart / nrmv # max iteration MAX_ITER = 50 alpha = np.zeros(MAX_ITER) beta = np.zeros(MAX_ITER-1) V = xp.zeros((MAX_ITER, len(vstart)), dtype=vstart.dtype) V[0] = vstart res = None for j in range(len(vstart) - 1): if MAX_ITER - 1 == j: raise RuntimeError("krylov not converged") w = Afunc(V[j]) alpha[j] = xp.vdot(w, V[j]).real w -= alpha[j]*V[j] + (beta[j-1]*V[j-1] if j > 0 else 0) beta[j] = xp.linalg.norm(w) if beta[j] < 100*len(vstart)*np.finfo(float).eps: logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.') return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt) if 3 < j and j % 2 == 0: new_res = _expm_krylov(alpha[:j+1], beta[:j], V[:j+1].T, nrmv, dt) if res is not None and xp.allclose(res, new_res): return new_res else: res = new_res V[j + 1] = w / beta[j] return _expm_krylov(alpha, beta, V.T, nrmv, dt)
def check_arguments(fun, y0, support_complex): """Helper function for checking arguments common to all solvers.""" y0 = xp.asarray(y0) # dtype casting is not necessary in tdvp """ if xp.issubdtype(y0.dtype, xp.complexfloating): if not support_complex: raise ValueError("`y0` is complex, but the chosen solver does " "not support integration in a complex domain.") dtype = complex else: dtype = float y0 = y0.astype(dtype, copy=False) """ if y0.ndim != 1: raise ValueError("`y0` must be 1-dimensional.") return fun, y0
def _evolve_dmrg_tdvp_mctdh(self, mpo, evolve_dt) -> "Mps": # TDVP for original MCTDH if self.is_right_canon: assert self.check_right_canonical() self.canonicalise() # a workaround for https://github.com/scipy/scipy/issues/10164 imag_time = np.iscomplex(evolve_dt) if imag_time: evolve_dt = -evolve_dt.imag # used in calculating derivatives coef = -1 else: coef = 1j # qn for this method has not been implemented self.use_dummy_qn = True self.clear_qn() mps = self.to_complex(inplace=True) mps_conj = mps.conj() environ = Environ() environ.construct(mps, mps_conj, mpo, "R") # initial matrix ltensor = np.ones((1, 1, 1)) rtensor = np.ones((1, 1, 1)) new_mps = self.metacopy() cmf_rk_steps = [] for imps in range(len(mps)): ltensor = environ.GetLR( "L", imps - 1, mps, mps_conj, mpo, itensor=ltensor, method="System" ) rtensor = environ.GetLR( "R", imps + 1, mps, mps_conj, mpo, itensor=rtensor, method="Enviro" ) # density matrix S = transferMat(mps, mps_conj, "R", imps + 1).asnumpy() epsilon = 1e-8 w, u = scipy.linalg.eigh(S) try: w = w + epsilon * np.exp(-w / epsilon) except FloatingPointError: logger.warning(f"eigenvalue of density matrix contains negative value") w -= 2 * w.min() w = w + epsilon * np.exp(-w / epsilon) # print # "sum w=", np.sum(w) # S = u.dot(np.diag(w)).dot(np.conj(u.T)) S_inv = xp.asarray(u.dot(np.diag(1.0 / w)).dot(np.conj(u.T))) # pseudo inverse # S_inv = scipy.linalg.pinvh(S,rcond=1e-2) shape = mps[imps].shape hop = hop_factory(ltensor, rtensor, mpo[imps], len(shape)) func = integrand_func_factory(shape, hop, imps == len(mps) - 1, S_inv, coef) sol = solve_ivp( func, (0, evolve_dt), mps[imps].ravel().array, method="RK45" ) # print # "CMF steps:", len(sol.t) cmf_rk_steps.append(len(sol.t)) new_mps[imps] = sol.y[:, -1].reshape(shape) new_mps[imps].check_lortho() # print # "orthogonal1", np.allclose(np.tensordot(MPSnew[imps], # np.conj(MPSnew[imps]), axes=([0, 1], [0, 1])), # np.diag(np.ones(MPSnew[imps].shape[2]))) steps_stat = stats.describe(cmf_rk_steps) logger.debug(f"TDVP-MCTDH CMF steps: {steps_stat}") return new_mps
def astype(self, dtype): assert not (self.dtype == backend.complex_dtype and dtype == backend.real_dtype) self.array = xp.asarray(self.array, dtype=dtype) return self
def solve_ivp(fun, t_span, y0, method="RK45", t_eval=None, dense_output=False, events=None, vectorized=False, **options) -> OdeResult: """Solve an initial value problem for a system of ODEs. This function numerically integrates a system of ordinary differential equations given an initial value:: dy / dt = f(t, y) y(t0) = y0 Here t is a one-dimensional independent variable (time), y(t) is an n-dimensional vector-valued function (state), and an n-dimensional vector-valued function f(t, y) determines the differential equations. The goal is to find y(t) approximately satisfying the differential equations, given an initial value y(t0)=y0. Some of the solvers support integration in the complex domain, but note that for stiff ODE solvers, the right-hand side must be complex-differentiable (satisfy Cauchy-Riemann equations [11]_). To solve a problem in the complex domain, pass y0 with a complex data type. Another option is always to rewrite your problem for real and imaginary parts separately. Parameters ---------- fun : callable Right-hand side of the system. The calling signature is ``fun(t, y)``. Here ``t`` is a scalar, and there are two options for the ndarray ``y``: It can either have shape (n,); then ``fun`` must return array_like with shape (n,). Alternatively it can have shape (n, k); then ``fun`` must return an array_like with shape (n, k), i.e. each column corresponds to a single column in ``y``. The choice between the two options is determined by `vectorized` argument (see below). The vectorized implementation allows a faster approximation of the Jacobian by finite differences (required for stiff solvers). t_span : 2-tuple of floats Interval of integration (t0, tf). The solver starts with t=t0 and integrates until it reaches t=tf. y0 : array_like, shape (n,) Initial state. For problems in the complex domain, pass `y0` with a complex data type (even if the initial guess is purely real). method : string or `OdeSolver`, optional Integration method to use: * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_. The error is controlled assuming accuracy of the fourth-order method, but steps are taken using the fifth-order accurate formula (local extrapolation is done). A quartic interpolation polynomial is used for the dense output [2]_. Can be applied in the complex domain. * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error is controlled assuming accuracy of the second-order method, but steps are taken using the third-order accurate formula (local extrapolation is done). A cubic Hermite polynomial is used for the dense output. Can be applied in the complex domain. * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of order 5 [4]_. The error is controlled with a third-order accurate embedded formula. A cubic polynomial which satisfies the collocation conditions is used for the dense output. * 'BDF': Implicit multi-step variable-order (1 to 5) method based on a backward differentiation formula for the derivative approximation [5]_. The implementation follows the one described in [6]_. A quasi-constant step scheme is used and accuracy is enhanced using the NDF modification. Can be applied in the complex domain. * 'LSODA': Adams/BDF method with automatic stiffness detection and switching [7]_, [8]_. This is a wrapper of the Fortran solver from ODEPACK. You should use the 'RK45' or 'RK23' method for non-stiff problems and 'Radau' or 'BDF' for stiff problems [9]_. If not sure, first try to run 'RK45'. If needs unusually many iterations, diverges, or fails, your problem is likely to be stiff and you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal choice, but it might be somewhat less convenient to work with as it wraps old Fortran code. You can also pass an arbitrary class derived from `OdeSolver` which implements the solver. dense_output : bool, optional Whether to compute a continuous solution. Default is False. t_eval : array_like or None, optional Times at which to store the computed solution, must be sorted and lie within `t_span`. If None (default), use points selected by the solver. events : callable, list of callables or None, optional Types of events to track. Each is defined by a continuous function of time and state that becomes zero value in case of an event. Each function must have the signature ``event(t, y)`` and return a float. The solver will find an accurate value of ``t`` at which ``event(t, y(t)) = 0`` using a root-finding algorithm. Additionally each ``event`` function might have the following attributes: * terminal: bool, whether to terminate integration if this event occurs. Implicitly False if not assigned. * direction: float, direction of a zero crossing. If `direction` is positive, `event` must go from negative to positive, and vice versa if `direction` is negative. If 0, then either direction will count. Implicitly 0 if not assigned. You can assign attributes like ``event.terminal = True`` to any function in Python. If None (default), events won't be tracked. vectorized : bool, optional Whether `fun` is implemented in a vectorized fashion. Default is False. options Options passed to a chosen solver. All options available for already implemented solvers are listed below. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is xp.inf, i.e. the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits). But if a component of `y` is approximately below `atol`, the error only needs to fall within the same `atol` threshold, and the number of correct digits is not guaranteed. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. jac : {None, array_like, sparse_matrix, callable}, optional Jacobian matrix of the right-hand side of the system with respect to y, required by the 'Radau', 'BDF' and 'LSODA' method. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. There are three ways to define the Jacobian: * If array_like or sparse_matrix, the Jacobian is assumed to be constant. Not supported by 'LSODA'. * If callable, the Jacobian is assumed to depend on both t and y; it will be called as ``jac(t, y)`` as necessary. For the 'Radau' and 'BDF' methods, the return value might be a sparse matrix. * If None (default), the Jacobian will be approximated by finite differences. It is generally recommended to provide the Jacobian rather than relying on a finite-difference approximation. jac_sparsity : {None, array_like, sparse matrix}, optional Defines a sparsity structure of the Jacobian matrix for a finite-difference approximation. Its shape must be (n, n). This argument is ignored if `jac` is not `None`. If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [10]_. A zero entry means that a corresponding element in the Jacobian is always zero. If None (default), the Jacobian is assumed to be dense. Not supported by 'LSODA', see `lband` and `uband` instead. lband, uband : int or None Parameters defining the bandwidth of the Jacobian for the 'LSODA' method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting these requires your jac routine to return the Jacobian in the packed format: the returned array must have ``n`` columns and ``uband + lband + 1`` rows in which Jacobian diagonals are written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used in `scipy.linalg.solve_banded` (check for an illustration). These parameters can be also used with ``jac=None`` to reduce the number of Jacobian elements estimated by finite differences. min_step : float, optional The minimum allowed step size for 'LSODA' method. By default `min_step` is zero. Returns ------- Bunch object with the following fields defined: t : ndarray, shape (n_points,) Time points. y : ndarray, shape (n, n_points) Values of the solution at `t`. sol : `OdeSolution` or None Found solution as `OdeSolution` instance; None if `dense_output` was set to False. t_events : list of ndarray or None Contains for each event type a list of arrays at which an event of that type event was detected. None if `events` was None. nfev : int Number of evaluations of the right-hand side. njev : int Number of evaluations of the Jacobian. nlu : int Number of LU decompositions. status : int Reason for algorithm termination: * -1: Integration step failed. * 0: The solver successfully reached the end of `tspan`. * 1: A termination event occurred. message : string Human-readable description of the termination reason. success : bool True if the solver reached the interval end or a termination event occurred (``status >= 0``). References ---------- .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta formulae", Journal of Computational and Applied Mathematics, Vol. 6, No. 1, pp. 19-26, 1980. .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. .. [5] `Backward Differentiation Formula <https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_ on Wikipedia. .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE Solvers," IMACS Transactions on Scientific Computation, Vol 1., pp. 55-64, 1983. .. [8] L. Petzold, "Automatic selection of methods for solving stiff and nonstiff systems of ordinary differential equations", SIAM Journal on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, 1983. .. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on Wikipedia. .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. .. [11] `Cauchy-Riemann equations <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on Wikipedia. Examples -------- Basic exponential decay showing automatically chosen time points. >>> from scipy.integrate import solve_ivp >>> def exponential_decay(t, y): return -0.5 * y >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8]) >>> print(sol.t) [ 0. 0.11487653 1.26364188 3.06061781 4.85759374 6.65456967 8.4515456 10. ] >>> print(sol.y) [[2. 1.88836035 1.06327177 0.43319312 0.17648948 0.0719045 0.02929499 0.01350938] [4. 3.7767207 2.12654355 0.86638624 0.35297895 0.143809 0.05858998 0.02701876] [8. 7.5534414 4.25308709 1.73277247 0.7059579 0.287618 0.11717996 0.05403753]] Specifying points where the solution is desired. >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8], ... t_eval=[0, 1, 2, 4, 10]) >>> print(sol.t) [ 0 1 2 4 10] >>> print(sol.y) [[2. 1.21305369 0.73534021 0.27066736 0.01350938] [4. 2.42610739 1.47068043 0.54133472 0.02701876] [8. 4.85221478 2.94136085 1.08266944 0.05403753]] Cannon fired upward with terminal event upon impact. The ``terminal`` and ``direction`` fields of an event are applied by monkey patching a function. Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts at position 0 with velocity +10. Note that the integration never reaches t=100 because the event is terminal. >>> def upward_cannon(t, y): return [y[1], -0.5] >>> def hit_ground(t, y): return y[1] >>> hit_ground.terminal = True >>> hit_ground.direction = -1 >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground) >>> print(sol.t_events) [array([ 20.])] >>> print(sol.t) [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 1.11088891e-01 1.11098890e+00 1.11099890e+01 2.00000000e+01] """ if method not in METHODS and not (inspect.isclass(method) and issubclass(method, OdeSolver)): raise ValueError( "`method` must be one of {} or OdeSolver class.".format(METHODS)) t0, tf = float(t_span[0]), float(t_span[1]) if t_eval is not None: t_eval = xp.asarray(t_eval) if t_eval.ndim != 1: raise ValueError("`t_eval` must be 1-dimensional.") if xp.any(t_eval < min(t0, tf)) or xp.any(t_eval > max(t0, tf)): raise ValueError("Values in `t_eval` are not within `t_span`.") d = xp.diff(t_eval) if tf > t0 and xp.any(d <= 0) or tf < t0 and xp.any(d >= 0): raise ValueError("Values in `t_eval` are not properly sorted.") if tf > t0: t_eval_i = 0 else: # Make order of t_eval decreasing to use xp.searchsorted. t_eval = t_eval[::-1] # This will be an upper bound for slices. t_eval_i = t_eval.shape[0] if method in METHODS: method = METHODS[method] solver = method(fun, t0, y0, tf, vectorized=vectorized, **options) if t_eval is None: ts = [t0] ys = [y0] elif t_eval is not None and dense_output: ts = [] ti = [t0] ys = [] else: ts = [] ys = [] interpolants = [] events, is_terminal, event_dir = prepare_events(events) if events is not None: g = [event(t0, y0) for event in events] t_events = [[] for _ in range(len(events))] else: t_events = None status = None while status is None: message = solver.step() if solver.status == "finished": status = 0 elif solver.status == "failed": status = -1 break t_old = solver.t_old t = solver.t y = solver.y if dense_output: sol = solver.dense_output() interpolants.append(sol) else: sol = None if events is not None: g_new = [event(t, y) for event in events] active_events = find_active_events(g, g_new, event_dir) if active_events.size > 0: if sol is None: sol = solver.dense_output() root_indices, roots, terminate = handle_events( sol, events, active_events, is_terminal, t_old, t) for e, te in zip(root_indices, roots): t_events[e].append(te) if terminate: status = 1 t = roots[-1] y = sol(t) g = g_new if t_eval is None: ts.append(t) ys[-1] = y else: # The value in t_eval equal to t will be included. if solver.direction > 0: t_eval_i_new = xp.searchsorted(t_eval, t, side="right") t_eval_step = t_eval[t_eval_i:t_eval_i_new] else: t_eval_i_new = xp.searchsorted(t_eval, t, side="left") # It has to be done with two slice operations, because # you can't slice to 0-th element inclusive using backward # slicing. t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1] if t_eval_step.size > 0: if sol is None: sol = solver.dense_output() ts.append(t_eval_step) ys.append(sol(t_eval_step)) t_eval_i = t_eval_i_new if t_eval is not None and dense_output: ti.append(t) message = MESSAGES.get(status, message) if t_events is not None: t_events = [xp.asarray(te) for te in t_events] if t_eval is None: ts = xp.array(ts) ys = xp.vstack(ys).T else: ts = xp.hstack(ts) ys = xp.hstack(ys) if dense_output: if t_eval is None: sol = OdeSolution(ts, interpolants) else: sol = OdeSolution(ti, interpolants) else: sol = None return OdeResult( t=ts, y=ys, sol=sol, t_events=t_events, nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu, status=status, message=message, success=status >= 0, )
def num_jac(fun, t, y, f, threshold, factor, sparsity=None): """Finite differences Jacobian approximation tailored for ODE solvers. This function computes finite difference approximation to the Jacobian matrix of `fun` with respect to `y` using forward differences. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. A special feature of this function is the ability to correct the step size from iteration to iteration. The main idea is to keep the finite difference significantly separated from its round-off error which approximately equals ``EPS * xp.abs(f)``. It reduces a possibility of a huge error and assures that the estimated derivative are reasonably close to the true values (i.e. the finite difference approximation is at least qualitatively reflects the structure of the true Jacobian). Parameters ---------- fun : callable Right-hand side of the system implemented in a vectorized fashion. t : float Current time. y : ndarray, shape (n,) Current state. f : ndarray, shape (n,) Value of the right hand side at (t, y). threshold : float Threshold for `y` value used for computing the step size as ``factor * xp.maximum(xp.abs(y), threshold)``. Typically the value of absolute tolerance (atol) for a solver should be passed as `threshold`. factor : ndarray with shape (n,) or None Factor to use for computing the step size. Pass None for the very evaluation, then use the value returned from this function. sparsity : tuple (structure, groups) or None Sparsity structure of the Jacobian, `structure` must be csc_matrix. Returns ------- J : ndarray or csc_matrix, shape (n, n) Jacobian matrix. factor : ndarray, shape (n,) Suggested `factor` for the next evaluation. """ y = xp.asarray(y) n = y.shape[0] if n == 0: return xp.empty((0, 0)), factor if factor is None: factor = xp.full(n, EPS**0.5) else: factor = factor.copy() # Direct the step as ODE dictates, hoping that such a step won't lead to # a problematic region. For complex ODEs it makes sense to use the real # part of f as we use steps along real axis. f_sign = 2 * (xp.real(f) >= 0).astype(float) - 1 y_scale = f_sign * xp.maximum(threshold, xp.abs(y)) h = (y + factor * y_scale) - y # Make sure that the step is not 0 to start with. Not likely it will be # executed often. for i in xp.nonzero(h == 0)[0]: while h[i] == 0: factor[i] *= 10 h[i] = (y[i] + factor[i] * y_scale[i]) - y[i] if sparsity is None: return _dense_num_jac(fun, t, y, f, h, factor, y_scale) else: structure, groups = sparsity return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups)
def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups): n = y.shape[0] n_groups = xp.max(groups) + 1 h_vecs = xp.empty((n_groups, n)) for group in range(n_groups): e = xp.equal(group, groups) h_vecs[group] = h * e h_vecs = h_vecs.T f_new = fun(t, y[:, None] + h_vecs) df = f_new - f[:, None] i, j, _ = find(structure) diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc() max_ind = xp.array(abs(diff).argmax(axis=0)).ravel() r = xp.arange(n) max_diff = xp.asarray(xp.abs(diff[max_ind, r])).ravel() scale = xp.maximum(xp.abs(f[max_ind]), xp.abs(f_new[max_ind, groups[r]])) diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale if xp.any(diff_too_small): ind, = xp.nonzero(diff_too_small) new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] h_new_all = xp.zeros(n) h_new_all[ind] = h_new groups_unique = xp.unique(groups[ind]) groups_map = xp.empty(n_groups, dtype=int) h_vecs = xp.empty((groups_unique.shape[0], n)) for k, group in enumerate(groups_unique): e = xp.equal(group, groups) h_vecs[k] = h_new_all * e groups_map[group] = k h_vecs = h_vecs.T f_new = fun(t, y[:, None] + h_vecs) df = f_new - f[:, None] i, j, _ = find(structure[:, ind]) diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], (i, j)), shape=(n, ind.shape[0])).tocsc() max_ind_new = xp.array(abs(diff_new).argmax(axis=0)).ravel() r = xp.arange(ind.shape[0]) max_diff_new = xp.asarray(xp.abs(diff_new[max_ind_new, r])).ravel() scale_new = xp.maximum( xp.abs(f[max_ind_new]), xp.abs(f_new[max_ind_new, groups_map[groups[ind]]])) update = max_diff[ind] * scale_new < max_diff_new * scale[ind] if xp.any(update): update, = xp.nonzero(update) update_ind = ind[update] factor[update_ind] = new_factor[update] h[update_ind] = h_new[update] diff[:, update_ind] = diff_new[:, update] scale[update_ind] = scale_new[update] max_diff[update_ind] = max_diff_new[update] diff.data /= xp.repeat(h, xp.diff(diff.indptr)) factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE factor = xp.maximum(factor, NUM_JAC_MIN_FACTOR) return diff, factor