def run_integrator(method, show=False, tol=0.001, adaptive=True): f_integral = method(f_lorenz, adaptive=adaptive, tol=tol, show_code=True) x, y, z = bm.ones(1), bm.ones(1), bm.ones(1) dt = bm.ones(1) * 0.01 def f(t): x.value, y.value, z.value, dt[:] = f_integral(x, y, z, t, dt=dt.value) f_scan = bm.make_loop(f, dyn_vars=[x, y, z, dt], out_vars=[x, y, z, dt]) times = bm.arange(0, duration, _dt) mon_x, mon_y, mon_z, mon_dt = f_scan(times.value) mon_x = np.array(mon_x).flatten() mon_y = np.array(mon_y).flatten() mon_z = np.array(mon_z).flatten() mon_dt = np.array(mon_dt).flatten() if show: fig = plt.figure() ax = fig.gca(projection='3d') plt.plot(mon_x, mon_y, mon_z) ax.set_xlabel('x') ax.set_xlabel('y') ax.set_xlabel('z') plt.show() plt.plot(mon_dt) plt.show() return mon_x, mon_y, mon_z, mon_dt
def find_fixed_points(): cann = CANN1D(num=512, k=k, A=A, a=a) candidates = cann.get_stimulus_by_pos( bm.arange(-bm.pi, bm.pi, 0.01).reshape((-1, 1))) candidates += bm.random.normal(0., 0.01, candidates.shape) # candidates = bm.random.uniform(0, 20., (1000, cann.num)) finder = bp.analysis.SlowPointFinder(f_cell=cann.cell) # finder.find_fps_with_gd_method( # candidates=candidates, # tolerance=1e-6, # opt_setting=dict(method=bm.optimizers.Adam, # # lr=bm.optimizers.ExponentialDecay(0.05, 1, 0.9999)), # lr=bm.optimizers.ExponentialDecay(0.1, 2, 0.999)), # num_batch=200 # ) finder.find_fps_with_opt_solver(candidates) finder.filter_loss(1e-5) finder.keep_unique() # finder.exclude_outliers() np.save(fps_output_fn, finder.fixed_points) print(finder.fixed_points) print(finder.losses)
def test_fix_type(self): duration = 10. dt = 0.1 for jit in [True, False]: for run_method in [bp.ReportRunner, bp.StructRunner]: ds = ExampleDS() runner = run_method(ds, inputs=('o', 1.), monitors=['o'], dyn_vars=ds.vars(), jit=jit, dt=dt) runner(duration) length = int(duration / dt) assert bm.array_equal(runner.mon.o, bm.repeat(bm.arange(length) + 1, 2).reshape((length, 2)))
def test_syn2post_softmax(self): data = bm.arange(5) segment_ids = bm.array([0, 0, 1, 1, 2]) f_ans = bm.syn2post_softmax(data, segment_ids, 3) true_ans = bm.asarray([ jnp.exp(data[0]) / (jnp.exp(data[0]) + jnp.exp(data[1])), jnp.exp(data[1]) / (jnp.exp(data[0]) + jnp.exp(data[1])), jnp.exp(data[2]) / (jnp.exp(data[2]) + jnp.exp(data[3])), jnp.exp(data[3]) / (jnp.exp(data[2]) + jnp.exp(data[3])), jnp.exp(data[4]) / jnp.exp(data[4]) ]) print() print(bm.asarray(f_ans)) print(true_ans) print(f_ans == true_ans) # self.assertTrue(bm.array_equal(bm.syn2post_softmax(data, segment_ids, 3), # true_ans)) data = bm.arange(5) segment_ids = bm.array([0, 0, 1, 1, 2]) print(bm.syn2post_softmax(data, segment_ids, 4))
def __init__(self, size, delay, dtype=None, dt=None, **kwargs): # dt self.dt = bm.get_dt() if dt is None else dt # data size if isinstance(size, int): size = (size, ) if not isinstance(size, (tuple, list)): raise ModelBuildError( f'"size" must a tuple/list of int, but we got {type(size)}: {size}' ) self.size = tuple(size) # delay time length self.delay = delay # data and operations if isinstance(delay, (int, float)): # uniform delay self.uniform_delay = True self.num_step = int(pm.ceil(delay / self.dt)) + 1 self.out_idx = bm.Variable(bm.array([0], dtype=bm.uint32)) self.in_idx = bm.Variable( bm.array([self.num_step - 1], dtype=bm.uint32)) self.data = bm.Variable( bm.zeros((self.num_step, ) + self.size, dtype=dtype)) else: # non-uniform delay self.uniform_delay = False if not len(self.size) == 1: raise NotImplementedError( f'Currently, BrainPy only supports 1D heterogeneous ' f'delays, while we got the heterogeneous delay with ' f'{len(self.size)}-dimensions.') self.num = size2len(size) if bm.ndim(delay) != 1: raise ModelBuildError(f'Only support a 1D non-uniform delay. ' f'But we got {delay.ndim}D: {delay}') if delay.shape[0] != self.size[0]: raise ModelBuildError( f"The first shape of the delay time size must " f"be the same with the delay data size. But " f"we got {delay.shape[0]} != {self.size[0]}") delay = bm.around(delay / self.dt) self.diag = bm.array(bm.arange(self.num), dtype=bm.int_) self.num_step = bm.array(delay, dtype=bm.uint32) + 1 self.in_idx = bm.Variable(self.num_step - 1) self.out_idx = bm.Variable(bm.zeros(self.num, dtype=bm.uint32)) self.data = bm.Variable( bm.zeros((self.num_step.max(), ) + size, dtype=dtype)) super(ConstantDelay, self).__init__(**kwargs)
def __call__(self, duration, start_t=None): """The running function. Parameters ---------- duration : float, int, tuple, list The running duration. start_t : float, optional The start simulation time. Returns ------- running_time : float The total running time. """ # time step if start_t is None: if self._start_t is None: start_t = 0. else: start_t = self._start_t end_t = start_t + duration # times times = math.arange(start_t, end_t, self.dt) # build inputs for key in self.mon.item_contents.keys(): self.mon.item_contents[key] = [] # reshape the monitor items # simulations t0 = time.time() pbar = tqdm.auto.tqdm(total=times.size) pbar.set_description( f"Running a duration of {round(float(duration), 3)} ({times.size} steps)", refresh=True) for run_idx in range(times.size): self._step((times[run_idx], self.dt)) pbar.update() pbar.close() running_time = time.time() - t0 # monitor post steps self.mon.ts = times for key, val in self.mon.item_contents.items(): self.mon.item_contents[key] = math.asarray(val) self._start_t = end_t if self.numpy_mon_after_run: self.mon.numpy() return running_time
def __call__(self, duration, start_t=None): """The running function. Parameters ---------- duration : float, int, tuple, list The running duration. start_t : float, optional Returns ------- running_time : float The total running time. """ if len(self._dyn_args) > 0: self.dyn_vars['_idx'][0] = 0 # time step if start_t is None: if self._start_t is None: start_t = 0. else: start_t = float(self._start_t) end_t = float(start_t + duration) # times times = math.arange(start_t, end_t, self.dt) time_steps = math.ones_like(times) * self.dt # running if self.progress_bar: self._pbar = tqdm.auto.tqdm(total=times.size) self._pbar.set_description( f"Running a duration of {round(float(duration), 3)} ({times.size} steps)", refresh=True) t0 = time.time() hists = self.step_func([times.value, time_steps.value]) running_time = time.time() - t0 if self.progress_bar: self._pbar.close() # post-running self._post(times, hists) self._start_t = end_t if self.numpy_mon_after_run: self.mon.numpy() return running_time
evals, _ = np.linalg.eig((net.w_rr + net.w_ro @ net.w_or).numpy()) plt.subplot(224) plt.plot(np.real(evals), np.imag(evals), 'o', color='orange') plt.plot(x_circ, y_circ, 'k') plt.plot(x_circ, -y_circ, 'k') plt.axis('equal') plt.title('Eigenvalues of W_rr + W_ro * W_or') plt.tight_layout() plt.show() # %% dt = 0.1 T = 30 times = bm.arange(0, T, dt) xs = bm.zeros((times.shape[0], 1)) # %% [markdown] # Generate some target data by running an ESN, and just grabbing hidden dimensions as the targets of the FORCE trained network. # %% esn1 = EchoStateNet(num_input=1, num_hidden=500, num_output=20, dt=dt, g=1.8) rs, ys = esn1.simulate(xs) targets = rs[:, 0:esn1. num_output] # This will be the training data for the trained ESN plt.plot(times, targets + 2 * np.arange(0, esn1.num_output), 'g') plt.xlim((0, T)) plt.ylabel('Dimensions') plt.xlabel('Time') plt.show()
def test_syn2post_mean(self): data = bm.arange(5) segment_ids = bm.array([0, 0, 1, 1, 2]) self.assertTrue( bm.array_equal(bm.syn2post_mean(data, segment_ids, 3), bm.asarray([0.5, 2.5, 4.])))
def test_syn2post_prod(self): data = bm.arange(5) segment_ids = bm.array([0, 0, 1, 1, 2]) self.assertTrue( bm.array_equal(bm.syn2post_prod(data, segment_ids, 3), bm.asarray([0, 6, 4])))
def batch_train(start_i, num_batch): f = bm.make_loop(train, dyn_vars=dyn_vars, has_return=True) return f(bm.arange(start_i, start_i + num_batch))