class MassTest(NeuralMass): required_params = ["a", "b"] num_state_variables = 1 num_noise_variables = 2 helper_variables = ["helper_test"] python_callbacks = ["test_callback"] _noise_input = [ZeroInput(), ZeroInput()]
class InhMassTest(NeuralMass): label = INH required_params = ["a", "b"] coupling_variables = {0: "coupling_INH"} state_variable_names = ["q"] num_state_variables = 1 num_noise_variables = 2 mass_type = INH _noise_input = [ZeroInput(), ZeroInput()]
class ExcMassTest(NeuralMass): label = EXC required_params = ["a", "b"] coupling_variables = {0: "coupling_EXC"} state_variable_names = ["q"] num_state_variables = 1 num_noise_variables = 2 mass_type = EXC _noise_input = [ZeroInput(), ZeroInput()]
def test_noise_input(self): node = self._create_node() self.assertTrue(all(isinstance(noise, ZeroInput) for noise in node.noise_input)) node.noise_input = [ OrnsteinUhlenbeckProcess(0.0, 0.0, 1.0), ZeroInput(), OrnsteinUhlenbeckProcess(0.0, 0.0, 1.0), ZeroInput(), ] self.assertTrue(isinstance(node.noise_input[0], OrnsteinUhlenbeckProcess)) self.assertTrue(isinstance(node.noise_input[2], OrnsteinUhlenbeckProcess)) self.assertTrue(isinstance(node.noise_input[1], ZeroInput)) self.assertTrue(isinstance(node.noise_input[3], ZeroInput))
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native FitzHugh-Nagumo model. """ # run this model - default is diffusive coupling fhn_multi = FitzHughNagumoNetwork(self.SC, self.DELAYS, seed=SEED) multi_result = fhn_multi.run( DURATION, DT, ZeroInput(fhn_multi.num_noise_variables).as_array(DURATION, DT), backend="numba") # run neurolib's model fhn_neurolib = FHNModel(Cmat=self.SC, Dmat=self.DELAYS, seed=SEED) fhn_neurolib.params["duration"] = DURATION fhn_neurolib.params["dt"] = DT # there is no "global coupling" parameter in MultiModel fhn_neurolib.params["K_gl"] = 1.0 # delays <-> length matrix fhn_neurolib.params["signalV"] = 1.0 fhn_neurolib.params["coupling"] = "diffusive" fhn_neurolib.params["sigma_ou"] = 0.0 fhn_neurolib.params["xs_init"] = fhn_multi.initial_state[::2][:, np. newaxis] fhn_neurolib.params["ys_init"] = fhn_multi.initial_state[1::2][:, np. newaxis] fhn_neurolib.run() for var in NEUROLIB_VARIABLES_TO_TEST: corr_mat = np.corrcoef(fhn_neurolib[var], multi_result[var].values.T) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_run(self): rww = ReducedWongWangNetwork(self.SC, self.DELAYS, seed=SEED) all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = rww.run( DURATION, DT, noise_func(ZeroInput(rww.num_noise_variables), DURATION, DT), backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), rww.num_state_variables / rww.num_nodes) self.assertTrue( all(result[result_].shape == (int(DURATION / DT), rww.num_nodes) for result_ in result)) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: corr_mat = np.corrcoef( np.vstack([ result[state_var].values.flatten().astype(float) for result in all_results ])) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_run(self): fhn = self._create_node() all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = fhn.run( DURATION, DT, noise_func(ZeroInput(fhn.num_noise_variables), DURATION, DT), backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), fhn.num_state_variables) self.assertTrue( all(state_var in result for state_var in fhn.state_variable_names[0])) self.assertTrue( all(result[state_var].shape == (int(DURATION / DT), 1) for state_var in fhn.state_variable_names[0])) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: corr_mat = np.corrcoef( np.vstack([ result[state_var].values.flatten().astype(float) for result in all_results ])) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_run(self): thlm = self._create_node() all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = thlm.run( DURATION, DT, noise_func(ZeroInput(thlm.num_noise_variables), DURATION, DT), backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), thlm.num_state_variables) self.assertTrue( all(state_var in result for state_var in thlm.state_variable_names[0])) self.assertTrue( all(result[state_var].shape == (int(DURATION / DT), 1) for state_var in thlm.state_variable_names[0])) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: corr_mat = np.corrcoef( np.vstack([ result[state_var].values.flatten().astype(float) for result in all_results ])) if not np.any(np.isnan(corr_mat)): # some variables have zero variance (i.e. excitatory synaptic # activity to the TCR - it does not have any in isolated mode # without noise) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native Wilson-Cowan model. """ wc_multi = WilsonCowanNetwork(self.SC, self.DELAYS) multi_result = wc_multi.run( DURATION, DT, ZeroInput(wc_multi.num_noise_variables).as_array(DURATION, DT), backend="numba" ) # run neurolib's model wc_neurolib = WCModel(Cmat=self.SC, Dmat=self.DELAYS, seed=SEED) wc_neurolib.params["duration"] = DURATION wc_neurolib.params["dt"] = DT # there is no "global coupling" parameter in MultiModel wc_neurolib.params["K_gl"] = 1.0 # delays <-> length matrix wc_neurolib.params["signalV"] = 1.0 wc_neurolib.params["sigma_ou"] = 0.0 # match initial state wc_neurolib.params["exc_init"] = wc_multi.initial_state[::2][:, np.newaxis] wc_neurolib.params["inh_init"] = wc_multi.initial_state[1::2][:, np.newaxis] wc_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: for node_idx in range(len(wc_multi)): neurolib_ts = wc_neurolib[var_neurolib][node_idx, :] multi_ts = multi_result[var_multi].values.T[node_idx, :] if np.isnan(neurolib_ts).any() or np.isnan(multi_ts).any(): continue corr_mat = np.corrcoef(neurolib_ts, multi_ts) print(var_multi, node_idx, corr_mat) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_run(self): aln = ALNNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = aln.run( DURATION, DT, noise_func(ZeroInput(aln.num_noise_variables), DURATION, DT), backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), aln.num_state_variables / aln.num_nodes) self.assertTrue( all(result[result_].shape == (int(DURATION / DT), aln.num_nodes) for result_ in result)) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: all_ts = np.vstack([ result[state_var].values.flatten().astype(float) for result in all_results ]) if np.isnan(all_ts).any(): continue corr_mat = np.corrcoef(all_ts) print(corr_mat) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_backend_value_error(self): system = BackendTestingHelper() with pytest.raises(ValueError): _ = system.run(self.DURATION, self.DT, ZeroInput().as_cubic_splines( self.DURATION, self.DT), backend="wrong")
def _run_mass(self, node, duration, dt): coupling_variables = {k: 0.0 for k in node.required_couplings} noise = ZeroInput(num_iid=node.num_noise_variables).as_cubic_splines(duration, dt) system = jitcdde_input(node._derivatives(coupling_variables), input=noise) system.constant_past(np.array(node.initial_state)) system.adjust_diff() times = np.arange(dt, duration + dt, dt) return np.vstack([system.integrate(time) for time in times])
def test_set_noise_input(self): mass = MassTest(self.PARAMS) self.assertTrue( all(isinstance(noise, ZeroInput) for noise in mass.noise_input)) mass.noise_input = [ OrnsteinUhlenbeckProcess(0.0, 0.0, 1.0), ZeroInput() ] self.assertTrue( isinstance(mass.noise_input[0], OrnsteinUhlenbeckProcess)) self.assertTrue(isinstance(mass.noise_input[1], ZeroInput))
def test_return_raw_and_xarray(self): system = BackendTestingHelper() results_xr = system.run( self.DURATION, self.DT, ZeroInput().as_cubic_splines(self.DURATION, self.DT), backend="jitcdde", return_xarray=True, ) self.assertTrue(isinstance(results_xr, xr.Dataset)) times, results_raw = system.run( self.DURATION, self.DT, ZeroInput().as_cubic_splines(self.DURATION, self.DT), backend="jitcdde", return_xarray=False, ) self.assertTrue(isinstance(times, np.ndarray)) self.assertTrue(isinstance(results_raw, np.ndarray)) np.testing.assert_equal(times / 1000.0, results_xr.time) np.testing.assert_equal(results_raw.squeeze(), results_xr["y"].values.squeeze())
def test_jitcdde_other_features(self): system = BackendTestingHelper() _ = system.run(self.DURATION, self.DT, ZeroInput().as_cubic_splines(self.DURATION, self.DT), backend="jitcdde") system.backend_instance._check() system.backend_instance.dde_system.reset_integrator() system.backend_instance._integrate_blindly(system.max_delay) system.backend_instance.dde_system.purge_past() # past state as nodes x time system.backend_instance._set_past_from_vector(np.random.rand(1, 4), dt=self.DT) system.clean()
def test_run_jitcdde_vector_past(self): system = BackendTestingHelper() system.initial_state = np.random.rand(1, 4) results = system.run( self.DURATION, self.DT, ZeroInput().as_cubic_splines(self.DURATION, self.DT), backend="jitcdde", ) self.assertTrue(isinstance(results, xr.Dataset)) self.assertEqual(len(results), 1) self.assertTupleEqual( results[system.state_variable_names[0][0]].shape, (int(self.DURATION / self.DT), 1), ) self.assertTrue(all(dim in results.dims for dim in ["time", "node"]))
def test_run_numba(self): system = BackendTestingHelper() results = system.run( self.DURATION, self.DT, ZeroInput().as_array(self.DURATION, self.DT), backend="numba", ) results.attrs = self.EXTRA_ATTRS # assert type, length and shape of results self.assertTrue(isinstance(results, xr.Dataset)) self.assertEqual(len(results), 1) self.assertTupleEqual( results[system.state_variable_names[0][0]].shape, (int(self.DURATION / self.DT), 1), ) self.assertTrue(all(dim in results.dims for dim in ["time", "node"])) self.assertDictEqual(results.attrs, self.EXTRA_ATTRS)
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native ALN model. """ # run this model aln_multi = self._create_node() multi_result = aln_multi.run( DURATION, DT, ZeroInput(aln_multi.num_noise_variables).as_array(DURATION, DT), backend="numba" ) # run neurolib's model aln_neurolib = ALNModel(seed=SEED) aln_neurolib.params["duration"] = DURATION aln_neurolib.params["dt"] = DT aln_neurolib.params["mue_ext_mean"] = 0.0 aln_neurolib.params["mui_ext_mean"] = 0.0 aln_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: corr_mat = np.corrcoef(aln_neurolib[var_neurolib], multi_result[var_multi].values.T) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native FitzHugh-Nagumo model. """ # run this model fhn_multi = self._create_node() multi_result = fhn_multi.run( DURATION, DT, ZeroInput(fhn_multi.num_noise_variables).as_array(DURATION, DT), backend="numba") # run neurolib's model fhn = FHNModel(seed=SEED) fhn.params["duration"] = DURATION fhn.params["dt"] = DT fhn.run() for var in NEUROLIB_VARIABLES_TO_TEST: corr_mat = np.corrcoef(fhn[var], multi_result[var].values.T) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native Wilson-Cowan model. """ # run this model wc_multi = self._create_node() multi_result = wc_multi.run( DURATION, DT, ZeroInput(wc_multi.num_noise_variables).as_array(DURATION, DT), backend="numba" ) # run neurolib's model wc_neurolib = WCModel(seed=SEED) wc_neurolib.params["duration"] = DURATION wc_neurolib.params["dt"] = DT # match initial state wc_neurolib.params["exc_init"] = np.array([[wc_multi.initial_state[0]]]) wc_neurolib.params["inh_init"] = np.array([[wc_multi.initial_state[1]]]) wc_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: corr_mat = np.corrcoef(wc_neurolib[var_neurolib], multi_result[var_multi].values.T) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native ALN model. Marked with xfail, since sometimes fail on specific python version on Linux, no idea why, but the model works... """ aln_multi = ALNNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) multi_result = aln_multi.run( DURATION, DT, ZeroInput(aln_multi.num_noise_variables).as_array(DURATION, DT), backend="numba") # run neurolib's model aln_neurolib = ALNModel(Cmat=self.SC, Dmat=self.DELAYS, seed=SEED) aln_neurolib.params["duration"] = DURATION aln_neurolib.params["dt"] = DT # there is no "global coupling" parameter in MultiModel aln_neurolib.params["K_gl"] = 1.0 # delays <-> length matrix aln_neurolib.params["signalV"] = 1.0 aln_neurolib.params["sigma_ou"] = 0.0 aln_neurolib.params["mue_ext_mean"] = 0.0 aln_neurolib.params["mui_ext_mean"] = 0.0 # match initial state at least for current - this seems to be enough aln_neurolib.params["mufe_init"] = np.array([ aln_multi[0][0].initial_state[0], aln_multi[1][0].initial_state[0] ]) aln_neurolib.params["mufi_init"] = np.array([ aln_multi[0][1].initial_state[0], aln_multi[1][1].initial_state[0] ]) aln_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: for node_idx in range(len(aln_multi)): corr_mat = np.corrcoef( aln_neurolib[var_neurolib][node_idx, :], multi_result[var_multi].values.T[node_idx, :]) print(corr_mat) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_save_pickle(self): """ Testing for saver done here since we have a model to integrate so it's easy. """ system = BackendTestingHelper() # add attributes to test saving them results = system.run( self.DURATION, self.DT, ZeroInput().as_cubic_splines(self.DURATION, self.DT)) results.attrs = self.EXTRA_ATTRS # save to pickle pickle_name = os.path.join(self.TEST_DIR, "pickle_test") save_to_pickle(results, pickle_name) pickle_name += ".pkl" self.assertTrue(os.path.exists(pickle_name)) # load and check with open(pickle_name, "rb") as f: loaded = pickle.load(f) xr.testing.assert_equal(results, loaded) self.assertDictEqual(loaded.attrs, self.EXTRA_ATTRS)
def test_compare_w_neurolib_native_model(self): """ Compare with neurolib's native thalamic model. """ # run this model thalamus_multi = self._create_node() multi_result = thalamus_multi.run( DURATION, DT, ZeroInput(thalamus_multi.num_noise_variables).as_array( DURATION, DT), backend="numba") # run neurolib's model thlm_neurolib = ThalamicMassModel() thlm_neurolib.params["duration"] = DURATION thlm_neurolib.params["dt"] = DT thlm_neurolib.params["V_t_init"] = np.array([-70]) thlm_neurolib.params["V_r_init"] = np.array([-70]) thlm_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: corr_mat = np.corrcoef(thlm_neurolib[var_neurolib], multi_result[var_multi].values.T) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all())
def test_save_netcdf(self): """ Testing for saver done here since we have a model to integrate so it's easy. """ system = BackendTestingHelper() results = system.run( self.DURATION, self.DT, ZeroInput().as_cubic_splines(self.DURATION, self.DT)) results.attrs = self.EXTRA_ATTRS # save to pickle nc_name = os.path.join(self.TEST_DIR, "netcdf_test") save_to_netcdf(results, nc_name) # actual data self.assertTrue(os.path.exists(nc_name + ".nc")) # metadata self.assertTrue(os.path.exists(nc_name + ".json")) # load and check loaded = xr.load_dataset(nc_name + ".nc") with open(nc_name + ".json", "r") as f: attrs = json.load(f) loaded.attrs = attrs xr.testing.assert_equal(results, loaded) self.assertDictEqual(loaded.attrs, self.EXTRA_ATTRS)