def _check(module, module_name, ext): if module is None: raise errors.PackageMissingError( '"{package}" must be installed when you want to save/load data with {ext} ' 'format. \nPlease install {package} through "pip install {package}" or ' '"conda install {package}".'.format(package=module_name, ext=ext) )
def solve(self, diff_eq, var): if analysis_by_sympy is None or sympy is None: raise errors.PackageMissingError( f'Package "sympy" must be installed when the users ' f'want to utilize {ExponentialEuler.__name__}. ') f_expressions = diff_eq.get_f_expressions( substitute_vars=diff_eq.var_name) # code lines self.code_lines.extend( [f" {str(expr)}" for expr in f_expressions[:-1]]) # get the linear system using sympy f_res = f_expressions[-1] if len(f_res.code) > 500: raise errors.DiffEqError( f'Too complex differential equation:\n\n' f'{f_res.code}\n\n' f'SymPy cannot analyze. Please use {ExpEulerAuto} to ' f'make Exponential Euler integration due to it is capable of ' f'performing automatic differentiation.') df_expr = analysis_by_sympy.str2sympy(f_res.code).expr.expand() s_df = sympy.Symbol(f"{f_res.var_name}") self.code_lines.append( f' {s_df.name} = {analysis_by_sympy.sympy2str(df_expr)}') # get df part s_linear = sympy.Symbol(f'_{diff_eq.var_name}_linear') s_linear_exp = sympy.Symbol(f'_{diff_eq.var_name}_linear_exp') s_df_part = sympy.Symbol(f'_{diff_eq.var_name}_df_part') if df_expr.has(var): # linear linear = sympy.diff(df_expr, var, evaluate=True) # TODO: linear has unknown symbol self.code_lines.append( f' {s_linear.name} = {analysis_by_sympy.sympy2str(linear)}') # linear exponential self.code_lines.append( f' {s_linear_exp.name} = math.exp({s_linear.name} * {C.DT})') # df part df_part = (s_linear_exp - 1) / s_linear * s_df self.code_lines.append( f' {s_df_part.name} = {analysis_by_sympy.sympy2str(df_part)}') else: # df part self.code_lines.append( f' {s_df_part.name} = {s_df.name} * {C.DT}') return s_df_part
def symbolic_build(self): if self.var_type == constants.SYSTEM_VAR: raise errors.IntegratorError( f'Exponential Euler method do not support {self.var_type} variable type.' ) if self.intg_type != constants.ITO_SDE: raise errors.IntegratorError( f'Exponential Euler method only supports Ito integral, but we got {self.intg_type}.' ) if sympy is None or analysis_by_sympy is None: raise errors.PackageMissingError('SymPy must be installed when ' 'using exponential integrators.') # check bound method if hasattr(self.derivative[constants.F], '__self__'): self.code_lines = [ f'def {self.func_name}({", ".join(["self"] + list(self.arguments))}):' ] # 1. code scope closure_vars = inspect.getclosurevars(self.derivative[constants.F]) self.code_scope.update(closure_vars.nonlocals) self.code_scope.update(dict(closure_vars.globals)) self.code_scope['math'] = math # 2. code lines code_lines = self.code_lines # code_lines = [f'def {self.func_name}({", ".join(self.arguments)}):'] code_lines.append(f' {constants.DT}_sqrt = {constants.DT} ** 0.5') # 2.1 dg # dg = g(x, t, *args) all_dg = [f'{var}_dg' for var in self.variables] code_lines.append( f' {", ".join(all_dg)} = g({", ".join(self.variables + self.parameters)})' ) code_lines.append(' ') # 2.2 dW noise_terms(code_lines, self.variables) # 2.3 dgdW # ---- # SCALAR_WIENER : dg * dW # VECTOR_WIENER : math.sum(dg * dW, axis=-1) if self.wiener_type == constants.SCALAR_WIENER: for var in self.variables: code_lines.append(f' {var}_dgdW = {var}_dg * {var}_dW') else: for var in self.variables: code_lines.append( f' {var}_dgdW = math.sum({var}_dg * {var}_dW, axis=-1)') code_lines.append(' ') # 2.4 new var # ---- analysis = separate_variables(self.derivative[constants.F]) variables_for_returns = analysis['variables_for_returns'] expressions_for_returns = analysis['expressions_for_returns'] for vi, (key, vars) in enumerate(variables_for_returns.items()): # separate variables sd_variables = [] for v in vars: if len(v) > 1: raise ValueError( 'Cannot analyze multi-assignment code line.') sd_variables.append(v[0]) expressions = expressions_for_returns[key] var_name = self.variables[vi] diff_eq = analysis_by_sympy.SingleDiffEq(var_name=var_name, variables=sd_variables, expressions=expressions, derivative_expr=key, scope=self.code_scope, func_name=self.func_name) f_expressions = diff_eq.get_f_expressions( substitute_vars=diff_eq.var_name) # code lines code_lines.extend( [f" {str(expr)}" for expr in f_expressions[:-1]]) # get the linear system using sympy f_res = f_expressions[-1] df_expr = analysis_by_sympy.str2sympy(f_res.code).expr.expand() s_df = sympy.Symbol(f"{f_res.var_name}") code_lines.append( f' {s_df.name} = {analysis_by_sympy.sympy2str(df_expr)}') var = sympy.Symbol(diff_eq.var_name, real=True) # get df part s_linear = sympy.Symbol(f'_{diff_eq.var_name}_linear') s_linear_exp = sympy.Symbol(f'_{diff_eq.var_name}_linear_exp') s_df_part = sympy.Symbol(f'_{diff_eq.var_name}_df_part') if df_expr.has(var): # linear linear = sympy.collect(df_expr, var, evaluate=False)[var] code_lines.append( f' {s_linear.name} = {analysis_by_sympy.sympy2str(linear)}' ) # linear exponential code_lines.append( f' {s_linear_exp.name} = math.exp({analysis_by_sympy.sympy2str(linear)} * {constants.DT})' ) # df part df_part = (s_linear_exp - 1) / s_linear * s_df code_lines.append( f' {s_df_part.name} = {analysis_by_sympy.sympy2str(df_part)}' ) else: # linear exponential code_lines.append( f' {s_linear_exp.name} = {constants.DT}_sqrt') # df part code_lines.append( f' {s_df_part.name} = {s_df.name} * {constants.DT}') # update expression update = var + s_df_part # The actual update step code_lines.append( f' {diff_eq.var_name}_new = {analysis_by_sympy.sympy2str(update)} + {var_name}_dgdW' ) code_lines.append('') # returns new_vars = [f'{var}_new' for var in self.variables] code_lines.append(f' return {", ".join(new_vars)}') # return and compile self.integral = utils.compile_code( code_scope={k: v for k, v in self.code_scope.items()}, code_lines=self.code_lines, show_code=self.show_code, func_name=self.func_name) if hasattr(self.derivative[constants.F], '__self__'): host = self.derivative[constants.F].__self__ self.integral = self.integral.__get__(host, host.__class__)
def exp_euler_wrapper(f, show_code, dt, var_type, im_return): try: import sympy from brainpy.integrators import sympy_analysis except ModuleNotFoundError: raise errors.PackageMissingError( 'SymPy must be installed when using exponential euler methods.') if var_type == constants.SYSTEM_VAR: raise errors.IntegratorError( f'Exponential Euler method do not support {var_type} variable type.' ) dt_var = 'dt' class_kw, variables, parameters, arguments = utils.get_args(f) func_name = Tools.f_names(f) code_lines = [f'def {func_name}({", ".join(arguments)}):'] # code scope closure_vars = inspect.getclosurevars(f) code_scope = dict(closure_vars.nonlocals) code_scope.update(dict(closure_vars.globals)) code_scope[dt_var] = dt code_scope['f'] = f code_scope['exp'] = ops.exp analysis = separate_variables(f) variables_for_returns = analysis['variables_for_returns'] expressions_for_returns = analysis['expressions_for_returns'] for vi, (key, vars) in enumerate(variables_for_returns.items()): # separate variables sd_variables = [] for v in vars: if len(v) > 1: raise ValueError('Cannot analyze multi-assignment code line.') sd_variables.append(v[0]) expressions = expressions_for_returns[key] var_name = variables[vi] diff_eq = sympy_analysis.SingleDiffEq(var_name=var_name, variables=sd_variables, expressions=expressions, derivative_expr=key, scope=code_scope, func_name=func_name) f_expressions = diff_eq.get_f_expressions( substitute_vars=diff_eq.var_name) # code lines code_lines.extend([f" {str(expr)}" for expr in f_expressions[:-1]]) # get the linear system using sympy f_res = f_expressions[-1] df_expr = sympy_analysis.str2sympy(f_res.code).expr.expand() s_df = sympy.Symbol(f"{f_res.var_name}") code_lines.append( f' {s_df.name} = {sympy_analysis.sympy2str(df_expr)}') var = sympy.Symbol(diff_eq.var_name, real=True) # get df part s_linear = sympy.Symbol(f'_{diff_eq.var_name}_linear') s_linear_exp = sympy.Symbol(f'_{diff_eq.var_name}_linear_exp') s_df_part = sympy.Symbol(f'_{diff_eq.var_name}_df_part') if df_expr.has(var): # linear linear = sympy.collect(df_expr, var, evaluate=False)[var] code_lines.append( f' {s_linear.name} = {sympy_analysis.sympy2str(linear)}') # linear exponential linear_exp = sympy.exp(linear * dt) code_lines.append( f' {s_linear_exp.name} = {sympy_analysis.sympy2str(linear_exp)}' ) # df part df_part = (s_linear_exp - 1) / s_linear * s_df code_lines.append( f' {s_df_part.name} = {sympy_analysis.sympy2str(df_part)}') else: # linear exponential code_lines.append(f' {s_linear_exp.name} = sqrt({dt})') # df part code_lines.append( f' {s_df_part.name} = {sympy_analysis.sympy2str(dt * s_df)}') # update expression update = var + s_df_part # The actual update step code_lines.append( f' {diff_eq.var_name}_new = {sympy_analysis.sympy2str(update)}') code_lines.append('') code_lines.append(f' return {", ".join([f"{v}_new" for v in variables])}') return Tools.compile_and_assign_attrs(code_lines=code_lines, code_scope=code_scope, show_code=show_code, func_name=func_name, variables=variables, parameters=parameters, dt=dt, var_type=var_type)
def jit(obj_or_fun, nopython=True, fastmath=True, parallel=False, nogil=False, forceobj=False, looplift=True, error_model='python', inline='never', boundscheck=None, show_code=False, debug=False): """Just-In-Time (JIT) Compilation in NumPy backend. JIT compilation in NumPy backend relies on `Numba <http://numba.pydata.org/>`_. However, in BrainPy, `brainpy.math.numpy.jit()` can apply to class objects, especially the instance of :py:class:`brainpy.DynamicalSystem`. If you are using JAX backend, please refer to the JIT compilation in JAX backend ``brainpy.math.jit()``. Parameters ---------- debug : bool obj_or_fun : callable, Base The function or the base model to jit compile. nopython : bool Set to True to disable the use of PyObjects and Python API calls. Default value is True. fastmath : bool In certain classes of applications strict IEEE 754 compliance is less important. As a result it is possible to relax some numerical rigour with view of gaining additional performance. The way to achieve this behaviour in Numba is through the use of the ``fastmath`` keyword argument. parallel : bool Enables automatic parallelization (and related optimizations) for those operations in the function known to have parallel semantics. nogil : bool Whenever Numba optimizes Python code to native code that only works on native types and variables (rather than Python objects), it is not necessary anymore to hold Python’s global interpreter lock (GIL). Numba will release the GIL when entering such a compiled function if you passed ``nogil=True``. forceobj: bool Set to True to force the use of PyObjects for every value. Default value is False. looplift: bool Set to True to enable jitting loops in nopython mode while leaving surrounding code in object mode. This allows functions to allocate NumPy arrays and use Python objects, while the tight loops in the function can still be compiled in nopython mode. Any arrays that the tight loop uses should be created before the loop is entered. Default value is True. error_model: str The error-model affects divide-by-zero behavior. Valid values are 'python' and 'numpy'. The 'python' model raises exception. The 'numpy' model sets the result to *+/-inf* or *nan*. Default value is 'python'. inline: str or callable The inline option will determine whether a function is inlined at into its caller if called. String options are 'never' (default) which will never inline, and 'always', which will always inline. If a callable is provided it will be called with the call expression node that is requesting inlining, the caller's IR and callee's IR as arguments, it is expected to return Truthy as to whether to inline. NOTE: This inlining is performed at the Numba IR level and is in no way related to LLVM inlining. boundscheck: bool or None Set to True to enable bounds checking for array indices. Out of bounds accesses will raise IndexError. The default is to not do bounds checking. If False, bounds checking is disabled, out of bounds accesses can produce garbage results or segfaults. However, enabling bounds checking will slow down typical functions, so it is recommended to only use this flag for debugging. You can also set the NUMBA_BOUNDSCHECK environment variable to 0 or 1 to globally override this flag. The default value is None, which under normal execution equates to False, but if debug is set to True then bounds checking will be enabled. show_code : bool Debugging. """ # checking if numba is None: raise errors.PackageMissingError( 'JIT compilation in numpy backend need Numba. ' 'Please install numba via: \n' '>>> pip install numba\n' '>>> # or \n' '>>> conda install numba') return _jit(obj_or_fun, show_code=show_code, nopython=nopython, fastmath=fastmath, parallel=parallel, nogil=nogil, forceobj=forceobj, looplift=looplift, error_model=error_model, inline=inline, boundscheck=boundscheck, debug=debug)
def _numba_backend(): r = backend.get_backend().startswith('numba') if r and nb is None: raise errors.PackageMissingError( 'Please install numba for numba backend.') return r
# -*- coding: utf-8 -*- import ast import math from collections import Counter import numpy as np from brainpy import errors from brainpy import tools try: import sympy except ModuleNotFoundError: raise errors.PackageMissingError('Package "sympy" must be installed when the ' 'users want to utilize the sympy analysis.') import sympy.functions.elementary.complexes import sympy.functions.elementary.exponential import sympy.functions.elementary.hyperbolic import sympy.functions.elementary.integers import sympy.functions.elementary.miscellaneous import sympy.functions.elementary.trigonometric from sympy.codegen import cfunctions from sympy.printing.precedence import precedence from sympy.printing.str import StrPrinter CONSTANT_NOISE = 'CONSTANT' FUNCTIONAL_NOISE = 'FUNCTIONAL' FUNCTION_MAPPING = { # functions in inherit python
def scipy_minimize_with_jax(fun, x0, method=None, args=(), bounds=None, constraints=(), tol=None, callback=None, options=None): """ A simple wrapper for scipy.optimize.minimize using JAX. Parameters ---------- fun: function The objective function to be minimized, written in JAX code so that it is automatically differentiable. It is of type, ```fun: x, *args -> float``` where `x` is a PyTree and args is a tuple of the fixed parameters needed to completely specify the function. x0: jnp.ndarray Initial guess represented as a JAX PyTree. args: tuple, optional. Extra arguments passed to the objective function and its derivative. Must consist of valid JAX types; e.g. the leaves of the PyTree must be floats. method : str or callable, optional Type of solver. Should be one of - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>` - 'Powell' :ref:`(see here) <optimize.minimize-powell>` - 'CG' :ref:`(see here) <optimize.minimize-cg>` - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>` - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>` - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>` - 'TNC' :ref:`(see here) <optimize.minimize-tnc>` - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>` - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>` - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>` - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>` - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>` - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>` - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>` - custom - a callable object (added in version 0.14.0), see below for description. If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, depending on if the problem has constraints or bounds. bounds : sequence or `Bounds`, optional Bounds on variables for L-BFGS-B, TNC, SLSQP, Powell, and trust-constr methods. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. None is used to specify no bound. Note that in order to use `bounds` you will need to manually flatten them in the same order as your inputs `x0`. constraints : {Constraint, dict} or List of {Constraint, dict}, optional Constraints definition (only for COBYLA, SLSQP and trust-constr). Constraints for 'trust-constr' are defined as a single object or a list of objects specifying constraints to the optimization problem. Available constraints are: - `LinearConstraint` - `NonlinearConstraint` Constraints for COBYLA, SLSQP are defined as a list of dictionaries. Each dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (only for SLSQP). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. Note that COBYLA only supports inequality constraints. Note that in order to use `constraints` you will need to manually flatten them in the same order as your inputs `x0`. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. Depending on the method each iteration may use several function evaluations. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options()`. callback : callable, optional Called after each iteration. For 'trust-constr' it is a callable with the signature: ``callback(xk, OptimizeResult state) -> bool`` where ``xk`` is the current parameter vector represented as a PyTree, and ``state`` is an `OptimizeResult` object, with the same fields as the ones from the return. If callback returns True the algorithm execution is terminated. For all the other methods, the signature is: ```callback(xk)``` where `xk` is the current parameter vector, represented as a PyTree. Returns ------- res : The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x``: the solution array, represented as a JAX PyTree ``success``: a Boolean flag indicating if the optimizer exited successfully ``message``: describes the cause of the termination. See `scipy.optimize.OptimizeResult` for a description of other attributes. """ if soptimize is None: raise errors.PackageMissingError(f'"scipy" must be installed when user want to use ' f'function: {scipy_minimize_with_jax}') # Use tree flatten and unflatten to convert params x0 from PyTrees to flat arrays x0_flat, unravel = ravel_pytree(x0) # Wrap the objective function to consume flat _original_ # numpy arrays and produce scalar outputs. def fun_wrapper(x_flat, *args): x = unravel(x_flat) r = fun(x, *args) r = r.value if isinstance(r, bm.JaxArray) else r return float(r) # Wrap the gradient in a similar manner jac = jit(grad(fun)) def jac_wrapper(x_flat, *args): x = unravel(x_flat) g_flat, _ = ravel_pytree(jac(x, *args)) return np.array(g_flat) # Wrap the callback to consume a pytree def callback_wrapper(x_flat, *args): if callback is not None: x = unravel(x_flat) return callback(x, *args) # Minimize with scipy results = soptimize.minimize(fun_wrapper, x0_flat, args=args, method=method, jac=jac_wrapper, callback=callback_wrapper, bounds=bounds, constraints=constraints, tol=tol, options=options) # pack the output back into a PyTree results["x"] = unravel(results["x"]) return results
def exp_euler(f, g, dt, sde_type, var_type, wiener_type, show_code): try: import sympy from brainpy.integrators import sympy_analysis except ModuleNotFoundError: raise errors.PackageMissingError( 'SymPy must be installed when using exponential euler methods.' ) if var_type == constants.SYSTEM_VAR: raise errors.IntegratorError( f'Exponential Euler method do not support {var_type} variable type.' ) if sde_type != constants.ITO_SDE: raise errors.IntegratorError( f'Exponential Euler method only supports Ito integral, but we got {sde_type}.' ) vdt, variables, parameters, arguments, func_name = common.basic_info( f=f, g=g) # 1. code scope closure_vars = inspect.getclosurevars(f) code_scope = dict(closure_vars.nonlocals) code_scope.update(dict(closure_vars.globals)) code_scope['f'] = f code_scope['g'] = g code_scope[vdt] = dt code_scope[f'{vdt}_sqrt'] = dt**0.5 code_scope['ops'] = ops code_scope['exp'] = ops.exp # 2. code lines code_lines = [f'def {func_name}({", ".join(arguments)}):'] # 2.1 dg # dg = g(x, t, *args) all_dg = [f'{var}_dg' for var in variables] code_lines.append( f' {", ".join(all_dg)} = g({", ".join(variables + parameters)})') code_lines.append(' ') # 2.2 dW Tools.noise_terms(code_lines, variables) # 2.3 dgdW # ---- # SCALAR_WIENER : dg * dW # VECTOR_WIENER : ops.sum(dg * dW, axis=-1) if wiener_type == constants.SCALAR_WIENER: for var in variables: code_lines.append(f' {var}_dgdW = {var}_dg * {var}_dW') else: for var in variables: code_lines.append( f' {var}_dgdW = ops.sum({var}_dg * {var}_dW, axis=-1)') code_lines.append(' ') # 2.4 new var # ---- analysis = separate_variables(f) variables_for_returns = analysis['variables_for_returns'] expressions_for_returns = analysis['expressions_for_returns'] for vi, (key, vars) in enumerate(variables_for_returns.items()): # separate variables sd_variables = [] for v in vars: if len(v) > 1: raise ValueError( 'Cannot analyze multi-assignment code line.') sd_variables.append(v[0]) expressions = expressions_for_returns[key] var_name = variables[vi] diff_eq = sympy_analysis.SingleDiffEq(var_name=var_name, variables=sd_variables, expressions=expressions, derivative_expr=key, scope=code_scope, func_name=func_name) f_expressions = diff_eq.get_f_expressions( substitute_vars=diff_eq.var_name) # code lines code_lines.extend( [f" {str(expr)}" for expr in f_expressions[:-1]]) # get the linear system using sympy f_res = f_expressions[-1] df_expr = sympy_analysis.str2sympy(f_res.code).expr.expand() s_df = sympy.Symbol(f"{f_res.var_name}") code_lines.append( f' {s_df.name} = {sympy_analysis.sympy2str(df_expr)}') var = sympy.Symbol(diff_eq.var_name, real=True) # get df part s_linear = sympy.Symbol(f'_{diff_eq.var_name}_linear') s_linear_exp = sympy.Symbol(f'_{diff_eq.var_name}_linear_exp') s_df_part = sympy.Symbol(f'_{diff_eq.var_name}_df_part') if df_expr.has(var): # linear linear = sympy.collect(df_expr, var, evaluate=False)[var] code_lines.append( f' {s_linear.name} = {sympy_analysis.sympy2str(linear)}') # linear exponential linear_exp = sympy.exp(linear * dt) code_lines.append( f' {s_linear_exp.name} = {sympy_analysis.sympy2str(linear_exp)}' ) # df part df_part = (s_linear_exp - 1) / s_linear * s_df code_lines.append( f' {s_df_part.name} = {sympy_analysis.sympy2str(df_part)}' ) else: # linear exponential code_lines.append(f' {s_linear_exp.name} = sqrt({dt})') # df part code_lines.append( f' {s_df_part.name} = {sympy_analysis.sympy2str(dt * s_df)}' ) # update expression update = var + s_df_part # The actual update step code_lines.append( f' {diff_eq.var_name}_new = {sympy_analysis.sympy2str(update)} + {var_name}_dgdW' ) code_lines.append('') # returns new_vars = [f'{var}_new' for var in variables] code_lines.append(f' return {", ".join(new_vars)}') # return and compile return common.compile_and_assign_attrs(code_lines=code_lines, code_scope=code_scope, show_code=show_code, variables=variables, parameters=parameters, func_name=func_name, sde_type=sde_type, var_type=var_type, wiener_type=wiener_type, dt=dt)
def build(self): if analysis_by_sympy is None or sympy is None: raise errors.PackageMissingError( f'Package "sympy" must be installed when the users ' f'want to utilize {ExponentialEuler.__name__}. ') # check bound method if hasattr(self.f, '__self__'): self.code_lines = [ f'def {self.func_name}({", ".join(["self"] + list(self.arguments))}):' ] # code scope closure_vars = inspect.getclosurevars(self.f) self.code_scope.update(closure_vars.nonlocals) self.code_scope.update(dict(closure_vars.globals)) self.code_scope['math'] = math analysis = separate_variables(self.f) variables_for_returns = analysis['variables_for_returns'] expressions_for_returns = analysis['expressions_for_returns'] for vi, (key, all_var) in enumerate(variables_for_returns.items()): # separate variables sd_variables = [] for v in all_var: if len(v) > 1: raise ValueError( f'Cannot analyze multi-assignment code line: {v}.') sd_variables.append(v[0]) expressions = expressions_for_returns[key] var_name = self.variables[vi] diff_eq = analysis_by_sympy.SingleDiffEq(var_name=var_name, variables=sd_variables, expressions=expressions, derivative_expr=key, scope=self.code_scope, func_name=self.func_name) var = sympy.Symbol(diff_eq.var_name, real=True) try: s_df_part = tools.timeout(self.timeout)(self.solve)(diff_eq, var) except KeyboardInterrupt: raise errors.DiffEqError( f'{self.__class__} solve {self.f} failed, because ' f'symbolic differentiation of SymPy timeout due to {self.timeout} s limit. ' f'Instead, you can use {ExpEulerAuto} to make Exponential Euler ' f'integration due to due to it is capable of ' f'performing automatic differentiation.') # update expression update = var + s_df_part # The actual update step self.code_lines.append( f' {diff_eq.var_name}_new = {analysis_by_sympy.sympy2str(update)}' ) self.code_lines.append('') self.code_lines.append( f' return {", ".join([f"{v}_new" for v in self.variables])}') self.integral = utils.compile_code( code_scope={k: v for k, v in self.code_scope.items()}, code_lines=self.code_lines, show_code=self.show_code, func_name=self.func_name) if hasattr(self.f, '__self__'): host = self.f.__self__ self.integral = self.integral.__get__(host, host.__class__)