def __init__(self, f, var_type=None, dt=None, name=None, show_code=False): super(ODEIntegrator, self).__init__(name=name) # others self.dt = math.get_dt() if dt is None else dt assert isinstance(self.dt, (int, float)), f'"dt" must be a float, but got {self.dt}' self.show_code = show_code # derivative function self.derivative = {constants.F: f} self.f = f # integration function self.integral = None # parse function arguments variables, parameters, arguments = utils.get_args(f) self.variables = variables # variable names, (before 't') self.parameters = parameters # parameter names, (after 't') self.arguments = list(arguments) + [f'{constants.DT}={self.dt}'] # function arguments self.var_type = var_type # variable type # code scope self.code_scope = {constants.F: f} # code lines self.func_name = f_names(f) self.code_lines = [f'def {self.func_name}({", ".join(self.arguments)}):']
def wrapper_of_rk2(f, show_code, dt, beta): class_kw, variables, parameters, arguments = utils.get_args(f) func_name = _f_names(f) code_scope = {'f': f, 'dt': dt, 'beta': beta, 'k1': 1 - 1 / (2 * beta), 'k2': 1 / (2 * beta)} code_lines = [f'def {func_name}({", ".join(arguments)}):'] # k1 k1_args = variables + parameters k1_vars_d = [f'd{v}_k1' for v in variables] code_lines.append(f' {", ".join(k1_vars_d)} = f({", ".join(k1_args)})') # k2 k2_args = [f'{v} + d{v}_k1 * dt * beta' for v in variables] k2_args.append('t + dt * beta') k2_args.extend(parameters[1:]) k2_vars_d = [f'd{v}_k2' for v in variables] code_lines.append(f' {", ".join(k2_vars_d)} = f({", ".join(k2_args)})') # returns for v, k1, k2 in zip(variables, k1_vars_d, k2_vars_d): code_lines.append(f' {v}_new = {v} + ({k1} * k1 + {k2} * k2) * dt') return_vars = [f'{v}_new' for v in variables] code_lines.append(f' return {", ".join(return_vars)}') return _compile_and_assign_attrs( code_lines=code_lines, code_scope=code_scope, show_code=show_code, func_name=func_name, variables=variables, parameters=parameters, dt=dt)
def basic_info(f, g): vdt = 'dt' if f.__name__.isidentifier(): func_name = f.__name__ elif g.__name__.isidentifier(): func_name = g.__name__ else: global _SDE_UNKNOWN_NO func_name = f'unknown_sde{_SDE_UNKNOWN_NO}' func_new_name = constants.SDE_INT + func_name variables, parameters, arguments = utils.get_args(f) return vdt, variables, parameters, arguments, func_new_name
def _build_integrator(self, eq): if isinstance(eq, joint_eq.JointEq): results = [] for sub_eq in eq.eqs: results.extend(self._build_integrator(sub_eq)) return results else: vars, pars, _ = utils.get_args(eq) # checking if len(vars) != 1: raise errors.DiffEqError( f'{self.__class__} only supports numerical integration ' f'for one variable once, while we got {vars} in {eq}. ' f'Please split your multiple variables into multiple ' f'derivative functions.') # gradient function value_and_grad = math.vector_grad(eq, argnums=0, dyn_vars=self.dyn_var, return_value=True) # integration function def integral(*args, **kwargs): assert len(args) > 0 dt = kwargs.pop('dt', math.get_dt()) linear, derivative = value_and_grad(*args, **kwargs) phi = math.where(linear == 0., math.ones_like(linear), (math.exp(dt * linear) - 1) / (dt * linear)) return args[0] + dt * phi * derivative return [ (integral, vars, pars), ]
def __init__(self, f, g, dt=None, name=None, show_code=False, var_type=None, intg_type=None, wiener_type=None): super(SDEIntegrator, self).__init__(name=name) # derivative functions self.derivative = {constants.F: f, constants.G: g} self.f = f self.g = g # integration function self.integral = None # essential parameters self.dt = math.get_dt() if dt is None else dt assert isinstance( self.dt, (int, float)), f'"dt" must be a float, but got {self.dt}' intg_type = constants.ITO_SDE if intg_type is None else intg_type var_type = constants.SCALAR_VAR if var_type is None else var_type wiener_type = constants.SCALAR_WIENER if wiener_type is None else wiener_type if intg_type not in constants.SUPPORTED_INTG_TYPE: raise errors.IntegratorError( f'Currently, BrainPy only support SDE_INT types: ' f'{constants.SUPPORTED_INTG_TYPE}. But we got {intg_type}.') if var_type not in constants.SUPPORTED_VAR_TYPE: raise errors.IntegratorError( f'Currently, BrainPy only supports variable types: ' f'{constants.SUPPORTED_VAR_TYPE}. But we got {var_type}.') if wiener_type not in constants.SUPPORTED_WIENER_TYPE: raise errors.IntegratorError( f'Currently, BrainPy only supports Wiener ' f'Process types: {constants.SUPPORTED_WIENER_TYPE}. ' f'But we got {wiener_type}.') self.var_type = var_type # variable type self.intg_type = intg_type # integral type self.wiener_type = wiener_type # wiener process type # parse function arguments variables, parameters, arguments = utils.get_args(f) self.variables = variables # variable names, (before 't') self.parameters = parameters # parameter names, (after 't') self.arguments = list(arguments) + [f'{constants.DT}={self.dt}' ] # function arguments # random seed self.rng = math.random.RandomState() # code scope self.code_scope = { constants.F: f, constants.G: g, 'math': math, 'random': self.rng } # code lines self.func_name = f_names(f) self.code_lines = [ f'def {self.func_name}({", ".join(self.arguments)}):' ] # others self.show_code = show_code
def adaptive_rk_wrapper(f, dt, A, B1, B2, C, tol, adaptive, show_code, var_type): """Adaptive Runge-Kutta numerical method for ordinary differential equations. The embedded methods are designed to produce an estimate of the local truncation error of a single Runge-Kutta step, and as result, allow to control the error with adaptive stepsize. This is done by having two methods in the tableau, one with order p and one with order :math:`p-1`. The lower-order step is given by .. math:: y^*_{n+1} = y_n + h\\sum_{i=1}^s b^*_i k_i, where the :math:`k_{i}` are the same as for the higher order method. Then the error is .. math:: e_{n+1} = y_{n+1} - y^*_{n+1} = h\\sum_{i=1}^s (b_i - b^*_i) k_i, which is :math:`O(h^{p})`. The Butcher Tableau for this kind of method is extended to give the values of :math:`b_{i}^{*}` .. math:: \\begin{array}{c|cccc} c_1 & a_{11} & a_{12}& \\dots & a_{1s}\\\\ c_2 & a_{21} & a_{22}& \\dots & a_{2s}\\\\ \\vdots & \\vdots & \\vdots& \\ddots& \\vdots\\\\ c_s & a_{s1} & a_{s2}& \\dots & a_{ss} \\\\ \\hline & b_1 & b_2 & \\dots & b_s\\\\ & b_1^* & b_2^* & \\dots & b_s^*\\\\ \\end{array} Parameters ---------- f : callable The derivative function. show_code : bool Whether show the formatted code. dt : float The numerical precision. A : tuple, list The A matrix in the Butcher tableau. B1 : tuple, list The B1 vector in the Butcher tableau. B2 : tuple, list The B2 vector in the Butcher tableau. C : tuple, list The C vector in the Butcher tableau. adaptive : bool tol : float var_type : str Returns ------- integral_func : callable The one-step numerical integration function. """ assert var_type in constants.SUPPORTED_VAR_TYPE, \ f'"var_type" only supports {constants.SUPPORTED_VAR_TYPE}, ' \ f'not {var_type}.' class_kw, variables, parameters, arguments = utils.get_args(f) dt_var = 'dt' func_name = _f_names(f) if adaptive: # code scope code_scope = {'f': f, 'tol': tol} arguments = list(arguments) + ['dt'] else: # code scope code_scope = {'f': f, 'dt': dt} # code lines code_lines = [f'def {func_name}({", ".join(arguments)}):'] # stage steps _step(variables, dt_var, A, C, code_lines, parameters) # variable update return_args = _update(variables, dt_var, B1, code_lines) # error adaptive item if adaptive: errors = [] for v in variables: result = [] for i, (b1, b2) in enumerate(zip(B1, B2)): if isinstance(b1, str): b1 = eval(b1) if isinstance(b2, str): b2 = eval(b2) diff = b1 - b2 if diff != 0.: result.append(f'd{v}_k{i + 1} * {dt_var} * {diff}') if len(result) > 0: if var_type == constants.SCALAR_VAR: code_lines.append(f' {v}_te = abs({" + ".join(result)})') else: code_lines.append(f' {v}_te = sum(abs({" + ".join(result)}))') errors.append(f'{v}_te') if len(errors) > 0: code_lines.append(f' error = {" + ".join(errors)}') code_lines.append(f' if error > tol:') code_lines.append(f' {dt_var}_new = 0.9 * {dt_var} * (tol / error) ** 0.2') code_lines.append(f' else:') code_lines.append(f' {dt_var}_new = {dt_var}') return_args.append(f'{dt_var}_new') # returns code_lines.append(f' return {", ".join(return_args)}') # compilation return _compile_and_assign_attrs( code_lines=code_lines, code_scope=code_scope, show_code=show_code, func_name=func_name, variables=variables, parameters=parameters, dt=dt)
def rk_wrapper(f, show_code, dt, A, B, C): """Runge–Kutta methods for ordinary differential equation. For the system, .. math:: \frac{d y}{d t}=f(t, y) Explicit Runge-Kutta methods take the form .. math:: k_{i}=f\\left(t_{n}+c_{i}h,y_{n}+h\\sum _{j=1}^{s}a_{ij}k_{j}\\right) \\\\ y_{n+1}=y_{n}+h \\sum_{i=1}^{s} b_{i} k_{i} Each method listed on this page is defined by its Butcher tableau, which puts the coefficients of the method in a table as follows: .. math:: \\begin{array}{c|cccc} c_{1} & a_{11} & a_{12} & \\ldots & a_{1 s} \\\\ c_{2} & a_{21} & a_{22} & \\ldots & a_{2 s} \\\\ \\vdots & \vdots & \vdots & \\ddots & \vdots \\\\ c_{s} & a_{s 1} & a_{s 2} & \\ldots & a_{s s} \\\\ \\hline & b_{1} & b_{2} & \\ldots & b_{s} \\end{array} Parameters ---------- f : callable The derivative function. show_code : bool Whether show the formatted code. dt : float The numerical precision. A : tuple, list The A matrix in the Butcher tableau. B : tuple, list The B vector in the Butcher tableau. C : tuple, list The C vector in the Butcher tableau. Returns ------- integral_func : callable The one-step numerical integration function. """ class_kw, variables, parameters, arguments = utils.get_args(f) dt_var = 'dt' func_name = _f_names(f) # code scope code_scope = {'f': f, 'dt': dt} # code lines code_lines = [f'def {func_name}({", ".join(arguments)}):'] # step stage _step(variables, dt_var, A, C, code_lines, parameters) # variable update return_args = _update(variables, dt_var, B, code_lines) # returns code_lines.append(f' return {", ".join(return_args)}') # compilation return _compile_and_assign_attrs( code_lines=code_lines, code_scope=code_scope, show_code=show_code, func_name=func_name, variables=variables, parameters=parameters, dt=dt)
def exp_euler_wrapper(f, show_code, dt, var_type, im_return): try: import sympy from brainpy.integrators import sympy_analysis except ModuleNotFoundError: raise errors.PackageMissingError( 'SymPy must be installed when using exponential euler methods.') if var_type == constants.SYSTEM_VAR: raise errors.IntegratorError( f'Exponential Euler method do not support {var_type} variable type.' ) dt_var = 'dt' class_kw, variables, parameters, arguments = utils.get_args(f) func_name = Tools.f_names(f) code_lines = [f'def {func_name}({", ".join(arguments)}):'] # code scope closure_vars = inspect.getclosurevars(f) code_scope = dict(closure_vars.nonlocals) code_scope.update(dict(closure_vars.globals)) code_scope[dt_var] = dt code_scope['f'] = f code_scope['exp'] = ops.exp analysis = separate_variables(f) variables_for_returns = analysis['variables_for_returns'] expressions_for_returns = analysis['expressions_for_returns'] for vi, (key, vars) in enumerate(variables_for_returns.items()): # separate variables sd_variables = [] for v in vars: if len(v) > 1: raise ValueError('Cannot analyze multi-assignment code line.') sd_variables.append(v[0]) expressions = expressions_for_returns[key] var_name = variables[vi] diff_eq = sympy_analysis.SingleDiffEq(var_name=var_name, variables=sd_variables, expressions=expressions, derivative_expr=key, scope=code_scope, func_name=func_name) f_expressions = diff_eq.get_f_expressions( substitute_vars=diff_eq.var_name) # code lines code_lines.extend([f" {str(expr)}" for expr in f_expressions[:-1]]) # get the linear system using sympy f_res = f_expressions[-1] df_expr = sympy_analysis.str2sympy(f_res.code).expr.expand() s_df = sympy.Symbol(f"{f_res.var_name}") code_lines.append( f' {s_df.name} = {sympy_analysis.sympy2str(df_expr)}') var = sympy.Symbol(diff_eq.var_name, real=True) # get df part s_linear = sympy.Symbol(f'_{diff_eq.var_name}_linear') s_linear_exp = sympy.Symbol(f'_{diff_eq.var_name}_linear_exp') s_df_part = sympy.Symbol(f'_{diff_eq.var_name}_df_part') if df_expr.has(var): # linear linear = sympy.collect(df_expr, var, evaluate=False)[var] code_lines.append( f' {s_linear.name} = {sympy_analysis.sympy2str(linear)}') # linear exponential linear_exp = sympy.exp(linear * dt) code_lines.append( f' {s_linear_exp.name} = {sympy_analysis.sympy2str(linear_exp)}' ) # df part df_part = (s_linear_exp - 1) / s_linear * s_df code_lines.append( f' {s_df_part.name} = {sympy_analysis.sympy2str(df_part)}') else: # linear exponential code_lines.append(f' {s_linear_exp.name} = sqrt({dt})') # df part code_lines.append( f' {s_df_part.name} = {sympy_analysis.sympy2str(dt * s_df)}') # update expression update = var + s_df_part # The actual update step code_lines.append( f' {diff_eq.var_name}_new = {sympy_analysis.sympy2str(update)}') code_lines.append('') code_lines.append(f' return {", ".join([f"{v}_new" for v in variables])}') return Tools.compile_and_assign_attrs(code_lines=code_lines, code_scope=code_scope, show_code=show_code, func_name=func_name, variables=variables, parameters=parameters, dt=dt, var_type=var_type)