def backward_Euler(f, dt=None, epsilon=1e-12): """Backward Euler method. Also named as ``implicit_Euler``. Parameters ---------- f : callable The function at the right hand of the differential equation. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ f = autojit(f) if dt is None: dt = profile.get_dt() def int_f(y0, t, *args): y1 = y0 + dt * f(y0, t, *args) y2 = y0 + dt * f(y1, t, *args) while not np.all(np.abs(y1 - y2) < epsilon): y1 = y2 y2 = y0 + dt * f(y1, t, *args) return y2 return autojit(int_f)
def trapezoidal_rule(f, dt=None, epsilon=1e-12): """Trapezoidal rule. The trapezoidal rule is an implicit second-order method, which can be considered as both a Runge–Kutta method and a linear multistep method. Parameters ---------- f : callable The function at the right hand of the differential equation. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ f = autojit(f) if dt is None: dt = profile.get_dt() def int_f(y0, t, *args): dy0 = f(y0, t, *args) y1 = y0 + dt * dy0 y2 = y0 + dt / 2 * (dy0 + f(y1, t + dt, *args)) while not np.all(np.abs(y1 - y2) < epsilon): y1 = y2 y2 = y0 + dt / 2 * (dy0 + f(y1, t + dt, *args)) return y2 return autojit(int_f)
def rk3(f, dt=None): """Kutta's third-order method (commonly known as RK3). Also named as ``RK3``. Parameters ---------- f : callable The function at the right hand of the differential equation. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ f = autojit(f) if dt is None: dt = profile.get_dt() def int_f(y0, t, *args): k1 = f(y0, t, *args) k2 = f(y0 + dt / 2 * k1, t + dt / 2, *args) k3 = f(y0 - dt * k1 + 2 * dt * k2, t + dt, *args) return y0 + dt / 6 * (k1 + 4 * k2 + k3) return autojit(int_f)
def rk4_alternative(f, dt=None): """An alternative of fourth-order Runge-Kutta method. Also named as ``RK4_alternative``. Parameters ---------- f : callable The function at the right hand of the differential equation. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ f = autojit(f) if dt is None: dt = profile.get_dt() def int_f(y0, t, *args): k1 = f(y0, t, *args) k2 = f(y0 + dt / 3 * k1, t + dt / 3, *args) k3 = f(y0 - dt / 3 * k1 + dt * k2, t + 2 * dt / 3, *args) k4 = f(y0 + dt * k1 - dt * k2 + dt * k3, t + dt, *args) return y0 + dt / 8 * (k1 + 3 * k2 + 3 * k3 + k4) return autojit(int_f)
def rk2(f, dt=None, beta=2 / 3): """Parametric second-order Runge-Kutta (RK2). Also named as ``RK2``. Popular choices for 'beta': 1/2 : explicit midpoint method 2/3 : Ralston's method 1 : Heun's method, also known as the explicit trapezoid rule Parameters ---------- f : callable The function at the right hand of the differential equation. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ f = autojit(f) if dt is None: dt = profile.get_dt() def int_f(y0, t, *args): k1 = f(y0, t, *args) k2 = f(y0 + beta * dt * k1, t + beta * dt, *args) return y0 + dt * ((1 - 1 / (2 * beta)) * k1 + 1 / (2 * beta) * k2) return autojit(int_f)
def forward_Euler(f, dt=None): """Forward Euler method. Also named as ``explicit_Euler``. The most unstable integrator known. Requires a very small timestep. Accuracy is O(dt). Parameters ---------- f : callable The function at the right hand of the differential equation. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ f = autojit(f) if dt is None: dt = profile.get_dt() def int_f(y0, t, *args): return y0 + dt * f(y0, t, *args) return autojit(int_f)
def Heun_method(f, g, dt=None): """Stratonovich stochastic integral. Use the Stratonovich Heun algorithm to integrate Stratonovich equation, according to paper [2]_, [3]_. Parameters ---------- f : callable The drift coefficient, the deterministic part of the SDE. g : callable, float The diffusion coefficient, the stochastic part. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. References ---------- .. [2] H. Gilsing and T. Shardlow, SDELab: A package for solving stochastic differential equations in MATLAB, Journal of Computational and Applied Mathematics 205 (2007), no. 2, 1002{1018. .. [3] P.E. Kloeden, E. Platen, and H. Schurz, Numerical solution of SDE through computer experiments, Springer, 1994. """ dt = profile.get_dt() if dt is None else dt dt_sqrt = np.sqrt(dt) f = autojit(f) if callable(g): g = autojit(g) def int_fg(y0, t, *args): dW = np.random.normal(0.0, 1.0, y0.shape) df = f(y0, t - dt, *args) * dt gn = g(y0, t - dt, *args) y_bar = y0 + gn * dW * dt_sqrt gn_bar = g(y_bar, t, *args) dg = 0.5 * (gn + gn_bar) * dW * dt_sqrt y1 = y0 + df + dg return y1 else: assert isinstance(g, (int, float, np.ndarray)) def int_fg(y0, t, *args): dW = np.random.normal(0.0, 1.0, y0.shape) df = f(y0, t - dt, *args) * dt dg = g * dW * dt_sqrt y1 = y0 + df + dg return y1 return autojit(int_fg)
def run_time(self): """Get the time points of the network. Returns ------- times : numpy.ndarray The running time-steps of the network. """ return np.arange(0, self.current_time, profile.get_dt())
def firing_rate(mon, width, window='gaussian'): """Calculate the mean firing rate over in a neuron group. This method is adopted from Brian2. The firing rate in trial :math:`k` is the spike count :math:`n_{k}^{sp}` in an interval of duration :math:`T` divided by :math:`T`: .. math:: v_k = {n_k^{sp} \\over T} Parameters ---------- mon : StateMonitor The monitor which record spiking activities. width : int, float The width of the ``window`` in millisecond. window : str The window to use for smoothing. It can be a string to chose a predefined window: - `flat`: a rectangular, - `gaussian`: a Gaussian-shaped window. For the `Gaussian` window, the `width` parameter specifies the standard deviation of the Gaussian, the width of the actual window is `4 * width + dt`. For the `flat` window, the width of the actual window is `2 * width/2 + dt`. Returns ------- rate : numpy.ndarray The population rate in Hz, smoothed with the given window. """ # rate assert hasattr( mon, 'spike' ), 'Must record the "spike" of the neuron group to get firing rate.' rate = np.sum(mon.spike, axis=1) # window dt = profile.get_dt() if window == 'gaussian': width1 = 2 * width / dt width2 = int(np.round(width1)) window = np.exp(-np.arange(-width2, width2 + 1)**2 / (width1**2 / 2)) elif window == 'flat': width1 = int(width / 2 / dt) * 2 + 1 window = np.ones(width1) else: raise ValueError('Unknown window type "{}".'.format(window)) window = np.float_(window) return np.convolve(rate, window / sum(window), mode='same')
def Milstein_dfree_Stra(f, g, dt=None): """Stratonovich stochastic integral. The derivative-free Milstein method is an order 1.0 strong Taylor schema. Parameters ---------- f : callable The drift coefficient, the deterministic part of the SDE. g : callable, float The diffusion coefficient, the stochastic part. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ dt = profile.get_dt() if dt is None else dt dt_sqrt = np.sqrt(dt) f = autojit(f) if callable(g): g = autojit(g) def int_fg(y0, t, *args): dW = np.random.normal(0.0, 1.0, y0.shape) df = f(y0, t - dt, *args) * dt g_n = g(y0, t - dt, *args) dg = g_n * dW * dt_sqrt y_n_bar = y0 + df + g_n * dt_sqrt g_n_bar = g(y_n_bar, t, *args) extra_term = 0.5 * (g_n_bar - g_n) * (dW * dW * dt_sqrt) y1 = y0 + df + dg + extra_term return y1 else: assert isinstance(g, (int, float, np.ndarray)) def int_fg(y0, t, *args): dW = np.random.normal(0.0, 1.0, y0.shape) df = f(y0, t - dt, *args) * dt dg = g * dW * dt_sqrt y1 = y0 + df + dg return y1 return autojit(int_fg)
def Euler_method(f, g, dt=None): """Itô stochastic integral. The simplest stochastic numerical approximation is the Euler-Maruyama method. Its is an order 0.5 strong Taylor schema. Also named as ``EM``, ``EM_method``, ``Euler``, ``Euler_Maruyama_method``. Parameters ---------- f : callable The drift coefficient, the deterministic part of the SDE. g : callable, float The diffusion coefficient, the stochastic part. dt : None, float Precision of numerical integration. Returns ------- func : callable The one-step numerical integration function. """ dt = profile.get_dt() if dt is None else dt dt_sqrt = np.sqrt(dt) f = autojit(f) if callable(g): g = autojit(g) def int_fg(y0, t, *args): dW = np.random.normal(0.0, 1.0, y0.shape) df = f(y0, t, *args) * dt dg = dt_sqrt * g(y0, t, *args) * dW return y0 + df + dg else: assert isinstance(g, (int, float, np.ndarray)) def int_fg(y0, t, *args): dW = np.random.normal(0.0, 1.0, y0.shape) df = f(y0, t, *args) * dt dg = dt_sqrt * g * dW return y0 + df + dg return autojit(int_fg)
def format_delay(delay, dt=None): """Format the given delay and get the delay length. Parameters ---------- delay : None, int, float, np.ndarray The delay. dt : float, None The precision of the numerical integration. Returns ------- delay_len : int Delay length. """ if delay is None: delay_len = 1 elif isinstance(delay, (int, float)): dt = profile.get_dt() if dt is None else dt delay_len = int(np.ceil(delay / dt)) + 1 else: raise ValueError() return delay_len
def __init__(self, **kwargs): if 'kwargs' in kwargs: kwargs.pop('kwargs') for k, v in kwargs.items(): setattr(self, k, v) self.post.pre_synapses.append(self) self.pre.post_synapses.append(self) # check functions assert 'update_state' in kwargs, 'Must provide "update_state" function.' if 'output_synapse' not in kwargs: def f1(syn_state, var_index, neu_state): output_idx = var_index[-2] neu_state[-1] += syn_state[output_idx[0]][output_idx[1]] self.output_synapse = f1 if 'collect_spike' not in kwargs: def f2(syn_state, pre_neu_state, post_neu_state): syn_state[0][-1] = pre_neu_state[-3] self.collect_spike = f2 self.update_state = helper.autojit(self.update_state) self.output_synapse = helper.autojit(self.output_synapse) self.collect_spike = helper.autojit(self.collect_spike) # check `name` if 'name' not in kwargs: global synapse_no self.name = "Synapses-{}".format(synapse_no) synapse_no += 1 # check `num`, `num_pre` and `num_post` assert 'num' in kwargs, 'Must provide "num" attribute.' if 'num_pre' not in kwargs: self.num_pre = self.pre.num if 'num_post' not in kwargs: self.num_post = self.post.num # check `delay_len` if 'delay_len' not in kwargs: if 'delay' not in kwargs: raise ValueError('Must define "delay".') else: dt = kwargs.get('dt', profile.get_dt()) self.delay_len = format_delay(self.delay, dt) # check `var2index` if 'var2index' not in kwargs: raise ValueError('Must define "var2index".') assert isinstance(self.var2index, dict), '"var2index" must be a dict.' # "g" is the "delay_idx" # 'g_post' is the "output_idx" default_variables = [('pre_spike', (0, -1)), ('g', (1, self.delay_len - 1)), ('g_post', (1, 0)), ] self.default_variables = default_variables for k, _ in default_variables: if k in self.var2index: raise ValueError('"{}" is a pre-defined variable, ' 'cannot be defined in "var2index".'.format(k)) user_defined_variables = sorted(list(self.var2index.items()), key=lambda a: a[1]) syn_variables = user_defined_variables + default_variables var2index_array = np.zeros((len(syn_variables) + 1, 2), dtype=np.int32) var2index_array[-1, 0] = self.delay_len vars = dict(delay_len=-1) for i, (var, index) in enumerate(syn_variables): var2index_array[i] = list(index) vars[var] = i self.var2index = vars self.var2index_array = var2index_array
def run(self, duration, report=False, inputs=(), repeat=False): """Run the simulation for the given duration. This function provides the most convenient way to run the network. For example: >>> # first of all, define the network we want. >>> import npbrain as nn >>> lif1 = nn.LIF(10, noise=0.2) >>> lif2 = nn.LIF(10, noise=0.5) >>> syn = nn.VoltageJumpSynapse(lif1, lif2, 1.0, nn.conn.all2all(lif1.num, lif2.num)) >>> net = Network(syn, lif1, lif2) >>> # next, run the network. >>> net.run(100.) # run 100. ms >>> >>> # if you want to provide input to `lif1` >>> # for example, a constant input `11` (more complex inputs please use `input_factory.py`) >>> net.run(100., inputs=(lif1, 11.)) >>> >>> # if you want to provide input to `lif1` in the period of 30-50 ms. >>> net.run(100., inputs=(lif1, 11., (30., 50.))) >>> >>> # moreover, if you want to provide input to `lif1` in the period of 30-50 ms, >>> # and provide input to `lif2` in the period of 10-100 ms. >>> net.run(100., inputs=[(lif1, 11., (30., 50.)), (lif2, -1., (10., 100.))]) >>> >>> # if you want to known the running status in real-time. >>> net.run(100., report=True) >>> Parameters ---------- duration : int, float The amount of simulation time to run for. report : bool Report the progress of the simulation. repeat : bool Whether repeat run this model. If `repeat=True`, every time call this method will initialize the object state. inputs : list, tuple The receivers, external inputs and durations. """ # 1. checking # ------------ self._check_run_order() # 2. initialization # ------------------ # time dt = profile.get_dt() ts = np.arange(self.current_time, self.current_time + duration, dt) run_length = len(ts) # monitors for mon in self.monitors: mon.init_state(run_length) # neurons if repeat: if self._neuron_states is None: self._neuron_states = [ neu.state.copy() for neu in self.neurons ] else: for neu, state in zip(self.neurons, self._neuron_states): neu.state = state.copy() # synapses if repeat: if self._synapse_states is None: self._synapse_states = [] for syn in self.synapses: state = tuple(st.copy() for st in syn.state) self._synapse_states.append( [state, syn.var2index_array.copy()]) else: for syn, (state, var_index) in zip(self.synapses, self._synapse_states): syn.state = tuple(st.copy() for st in state) syn.var2index_array = var_index.copy() # 3. format external inputs # -------------------------- iterable_inputs, fixed_inputs, no_inputs = self._format_inputs_and_receiver( inputs, duration) # 4. run # --------- # initialize if report: t0 = time.time() self._input(0, iterable_inputs, fixed_inputs, no_inputs) self._step(ts[0], 0) # record time if report: print('Compilation used {:.4f} ms.'.format(time.time() - t0)) print("Start running ...") report_gap = int(run_length / 10) t0 = time.time() # run for run_idx in range(1, run_length): t = ts[run_idx] self._input(run_idx, iterable_inputs, fixed_inputs, no_inputs) self._step(t, run_idx) if report and ((run_idx + 1) % report_gap == 0): percent = (run_idx + 1) / run_length * 100 print('Run {:.1f}% using {:.3f} s.'.format( percent, time.time() - t0)) if report: print('Simulation is done. ') # 5. Finally # ----------- self.current_time = duration
def _format_inputs_and_receiver(self, inputs, duration): dt = profile.get_dt() # format inputs and receivers if len(inputs) > 1 and not isinstance(inputs[0], (list, tuple)): if isinstance(inputs[0], Neurons): inputs = [inputs] else: raise ValueError('Unknown input structure.') # --------------------- # classify input types # --------------------- # 1. iterable inputs # 2. fixed inputs iterable_inputs = [] fixed_inputs = [] neuron_with_inputs = [] for input_ in inputs: # get "receiver", "input", "duration" if len(input_) == 2: obj, Iext = input_ dur = (0, duration) elif len(input_) == 3: obj, Iext, dur = input_ else: raise ValueError err = 'You can assign inputs only for added object. "{}" is not in the network.' if isinstance(obj, str): try: obj = self._objsets[obj] except: raise ValueError(err.format(obj)) assert isinstance( obj, Neurons), "You can assign inputs only for Neurons." assert obj in self.objects, err.format(obj) assert len( dur ) == 2, "Must provide the start and the end simulation time." assert 0 <= dur[0] < dur[1] <= duration dur = (int(dur[0] / dt), int(dur[1] / dt)) neuron_with_inputs.append(obj) # judge the type of the inputs. if isinstance(Iext, (int, float)): Iext = np.ones(obj.num) * Iext fixed_inputs.append([obj, Iext, dur]) continue size = np.shape(Iext)[0] run_length = dur[1] - dur[0] if size != run_length: if size == 1: Iext = np.ones(obj.num) * Iext elif size == obj.num: Iext = Iext else: raise ValueError('Wrong size of inputs for', obj) fixed_inputs.append([obj, Iext, dur]) else: input_size = np.size(Iext[0]) err = 'The input size "{}" do not match with neuron ' \ 'group size "{}".'.format(input_size, obj.num) assert input_size == 1 or input_size == obj.num, err iterable_inputs.append([obj, Iext, dur]) # 3. no inputs no_inputs = [] for neu in self.neurons: if neu not in neuron_with_inputs: no_inputs.append(neu) return iterable_inputs, fixed_inputs, no_inputs