def compute_bTE_on_existing_links(time_series, delay_matrices, node_dynamics_model, history_target, history_source): nodes_n = np.shape(delay_matrices)[1] delay_flattened = delay_matrices.max(axis=0) # initialise an empty data object dat = Data() # Load time series if node_dynamics_model == 'boolean_random': normalise = False elif node_dynamics_model == 'AR_gaussian_discrete': normalise = True dat = Data(time_series, dim_order='psr', normalise=normalise) data = dat.data # Compute empirical bTE between all pairs settings = {} settings['history_target'] = history_target settings['history_source'] = history_source bTE_empirical_matrix = np.full((nodes_n, nodes_n), np.NaN) for X in range(nodes_n): for Y in range(nodes_n): if (delay_flattened[X, Y] > 0) and (X != Y): settings['source_target_delay'] = int(delay_flattened[X, Y]) if node_dynamics_model == 'boolean_random': estimator = JidtDiscreteTE(settings) elif node_dynamics_model == 'AR_gaussian_discrete': estimator = JidtGaussianTE(settings) bTE_empirical_matrix[X, Y] = estimator.estimate( data[X, :, 0], data[Y, :, 0]) return bTE_empirical_matrix
def test_lagged_mi(): """Test estimation of lagged MI.""" n = 10000 cov = 0.4 source = [rn.normalvariate(0, 1) for r in range(n)] target = [0] + [sum(pair) for pair in zip( [cov * y for y in source[0:n - 1]], [(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n - 1)]])] source = np.array(source) target = np.array(target) settings = { 'discretise_method': 'equal', 'n_discrete_bins': 4, 'history': 1, 'history_target': 1, 'lag_mi': 1, 'source_target_delay': 1} est_te_k = JidtKraskovTE(settings) te_k = est_te_k.estimate(source, target) est_te_d = JidtDiscreteTE(settings) te_d = est_te_d.estimate(source, target) est_d = JidtDiscreteMI(settings) mi_d = est_d.estimate(source, target) est_k = JidtKraskovMI(settings) mi_k = est_k.estimate(source, target) est_g = JidtGaussianMI(settings) mi_g = est_g.estimate(source, target) _compare_result(mi_d, te_d, 'JidtDiscreteMI', 'JidtDiscreteTE', 'lagged MI', tol=0.05) _compare_result(mi_k, te_k, 'JidtKraskovMI', 'JidtKraskovTE', 'lagged MI', tol=0.05) _compare_result(mi_g, te_k, 'JidtGaussianMI', 'JidtKraskovTE', 'lagged MI', tol=0.05)
def test_invalid_history_parameters(): """Ensure invalid history parameters raise a RuntimeError.""" # TE: Parameters are not integers settings = { 'history_target': 4, 'history_source': 4, 'tau_source': 2, 'tau_target': 2.5 } with pytest.raises(AssertionError): JidtDiscreteTE(settings=settings) with pytest.raises(AssertionError): JidtGaussianTE(settings=settings) with pytest.raises(AssertionError): JidtKraskovTE(settings=settings) settings['tau_source'] = 2.5 settings['tau_target'] = 2 with pytest.raises(AssertionError): JidtDiscreteTE(settings=settings) with pytest.raises(AssertionError): JidtGaussianTE(settings=settings) with pytest.raises(AssertionError): JidtKraskovTE(settings=settings) settings['history_source'] = 2.5 settings['tau_source'] = 2 with pytest.raises(AssertionError): JidtDiscreteTE(settings=settings) with pytest.raises(AssertionError): JidtGaussianTE(settings=settings) with pytest.raises(AssertionError): JidtKraskovTE(settings=settings) settings['history_target'] = 2.5 settings['history_source'] = 4 with pytest.raises(AssertionError): JidtDiscreteTE(settings=settings) with pytest.raises(AssertionError): JidtGaussianTE(settings=settings) with pytest.raises(AssertionError): JidtKraskovTE(settings=settings) # AIS: Parameters are not integers. settings = {'history': 4, 'tau': 2.5} with pytest.raises(AssertionError): JidtGaussianAIS(settings=settings) with pytest.raises(AssertionError): JidtKraskovAIS(settings=settings) settings = {'history': 4.5, 'tau': 2} with pytest.raises(AssertionError): JidtDiscreteAIS(settings=settings) with pytest.raises(AssertionError): JidtGaussianAIS(settings=settings) with pytest.raises(AssertionError): JidtKraskovAIS(settings=settings)
def test_one_two_dim_input_discrete(): """Test one- and two-dimensional input for discrete estimators.""" expected_mi, src_one, s, target_one = _get_gauss_data(expand=False, seed=SEED) src_two = np.expand_dims(src_one, axis=1) target_two = np.expand_dims(target_one, axis=1) ar_src_one, s = _get_ar_data(expand=False, seed=SEED) ar_src_two = np.expand_dims(ar_src_one, axis=1) settings = { 'discretise_method': 'equal', 'n_discrete_bins': 4, 'history_target': 1, 'history': 2 } # MI mi_estimator = JidtDiscreteMI(settings=settings) mi_cor_one = mi_estimator.estimate(src_one, target_one) _assert_result(mi_cor_one, expected_mi, 'JidtDiscreteMI', 'MI', 0.08) # More variability here mi_cor_two = mi_estimator.estimate(src_two, target_two) _assert_result(mi_cor_two, expected_mi, 'JidtDiscreteMI', 'MI', 0.08) # More variability here _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteMI one dim', 'JidtDiscreteMI two dim', 'MI') # CMI cmi_estimator = JidtDiscreteCMI(settings=settings) mi_cor_one = cmi_estimator.estimate(src_one, target_one) _assert_result(mi_cor_one, expected_mi, 'JidtDiscreteCMI', 'CMI', 0.08) # More variability here mi_cor_two = cmi_estimator.estimate(src_two, target_two) _assert_result(mi_cor_two, expected_mi, 'JidtDiscreteCMI', 'CMI', 0.08) # More variability here _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteMI one dim', 'JidtDiscreteMI two dim', 'CMI') # TE te_estimator = JidtDiscreteTE(settings=settings) mi_cor_one = te_estimator.estimate(src_one[1:], target_one[:-1]) _assert_result(mi_cor_one, expected_mi, 'JidtDiscreteTE', 'TE', 0.08) # More variability here mi_cor_two = te_estimator.estimate(src_one[1:], target_one[:-1]) _assert_result(mi_cor_two, expected_mi, 'JidtDiscreteTE', 'TE', 0.08) # More variability here _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteMI one dim', 'JidtDiscreteMI two dim', 'TE') # AIS ais_estimator = JidtDiscreteAIS(settings=settings) mi_cor_one = ais_estimator.estimate(ar_src_one) mi_cor_two = ais_estimator.estimate(ar_src_two) _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteAIS one dim', 'JidtDiscreteAIS two dim', 'AIS (AR process)')
def test_lagged_mi(): """Test estimation of lagged MI.""" n = 10000 cov = 0.4 source = [rn.normalvariate(0, 1) for r in range(n)] target = [0] + [ sum(pair) for pair in zip([cov * y for y in source[0:n - 1]], [ (1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n - 1)] ]) ] source = np.array(source) target = np.array(target) settings = { 'discretise_method': 'equal', 'n_discrete_bins': 4, 'history': 1, 'history_target': 1, 'lag_mi': 1, 'source_target_delay': 1 } est_te_k = JidtKraskovTE(settings) te_k = est_te_k.estimate(source, target) est_te_d = JidtDiscreteTE(settings) te_d = est_te_d.estimate(source, target) est_d = JidtDiscreteMI(settings) mi_d = est_d.estimate(source, target) est_k = JidtKraskovMI(settings) mi_k = est_k.estimate(source, target) est_g = JidtGaussianMI(settings) mi_g = est_g.estimate(source, target) _compare_result(mi_d, te_d, 'JidtDiscreteMI', 'JidtDiscreteTE', 'lagged MI', tol=0.05) _compare_result(mi_k, te_k, 'JidtKraskovMI', 'JidtKraskovTE', 'lagged MI', tol=0.05) _compare_result(mi_g, te_k, 'JidtGaussianMI', 'JidtKraskovTE', 'lagged MI', tol=0.05)
def test_one_two_dim_input_discrete(): """Test one- and two-dimensional input for discrete estimators.""" expected_mi, src_one, s, target_one = _get_gauss_data(expand=False) src_two = np.expand_dims(src_one, axis=1) target_two = np.expand_dims(target_one, axis=1) ar_src_one, s = _get_ar_data(expand=False) ar_src_two = np.expand_dims(ar_src_one, axis=1) settings = {'discretise_method': 'equal', 'n_discrete_bins': 4, 'history_target': 1, 'history': 2} # MI mi_estimator = JidtDiscreteMI(settings=settings) mi_cor_one = mi_estimator.estimate(src_one, target_one) _assert_result(mi_cor_one, expected_mi, 'JidtDiscreteMI', 'MI') mi_cor_two = mi_estimator.estimate(src_two, target_two) _assert_result(mi_cor_two, expected_mi, 'JidtDiscreteMI', 'MI') _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteMI one dim', 'JidtDiscreteMI two dim', 'MI') # CMI cmi_estimator = JidtDiscreteCMI(settings=settings) mi_cor_one = cmi_estimator.estimate(src_one, target_one) _assert_result(mi_cor_one, expected_mi, 'JidtDiscreteCMI', 'CMI') mi_cor_two = cmi_estimator.estimate(src_two, target_two) _assert_result(mi_cor_two, expected_mi, 'JidtDiscreteCMI', 'CMI') _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteMI one dim', 'JidtDiscreteMI two dim', 'CMI') # TE te_estimator = JidtDiscreteTE(settings=settings) mi_cor_one = te_estimator.estimate(src_one[1:], target_one[:-1]) _assert_result(mi_cor_one, expected_mi, 'JidtDiscreteTE', 'TE') mi_cor_two = te_estimator.estimate(src_one[1:], target_one[:-1]) _assert_result(mi_cor_two, expected_mi, 'JidtDiscreteTE', 'TE') _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteMI one dim', 'JidtDiscreteMI two dim', 'TE') # AIS ais_estimator = JidtDiscreteAIS(settings=settings) mi_cor_one = ais_estimator.estimate(ar_src_one) mi_cor_two = ais_estimator.estimate(ar_src_two) _compare_result(mi_cor_one, mi_cor_two, 'JidtDiscreteAIS one dim', 'JidtDiscreteAIS two dim', 'AIS (AR process)')
def test_te_gauss_data(): """Test TE estimation on two sets of Gaussian random data. The first test is on correlated variables, the second on uncorrelated variables. Note that the calculation is based on a random variable (because the generated data is a set of random variables) - the result will be of the order of what we expect, but not exactly equal to it in fact, there will be a large variance around it. """ expected_mi, source1, source2, target = _get_gauss_data(expand=False, seed=SEED) # add delay of one sample source1 = source1[1:] source2 = source2[1:] target = target[:-1] settings = { 'discretise_method': 'equal', 'n_discrete_bins': 4, 'history_target': 1 } # Test Kraskov mi_estimator = JidtKraskovTE(settings=settings) mi_cor = mi_estimator.estimate(source1, target) mi_uncor = mi_estimator.estimate(source2, target) _assert_result(mi_cor, expected_mi, 'JidtKraskovTE', 'TE (corr.)') _assert_result(mi_uncor, 0, 'JidtKraskovTE', 'TE (uncorr.)') # Test Gaussian mi_estimator = JidtGaussianTE(settings=settings) mi_cor = mi_estimator.estimate(source1, target) mi_uncor = mi_estimator.estimate(source2, target) _assert_result(mi_cor, expected_mi, 'JidtGaussianTE', 'TE (corr.)') _assert_result(mi_uncor, 0, 'JidtGaussianTE', 'TE (uncorr.)') # Test Discrete mi_estimator = JidtDiscreteTE(settings=settings) mi_cor = mi_estimator.estimate(source1, target) mi_uncor = mi_estimator.estimate(source2, target) _assert_result(mi_cor, expected_mi, 'JidtDiscreteTE', 'TE (corr.)', 0.08) # More variability here _assert_result(mi_uncor, 0, 'JidtDiscreteTE', 'TE (uncorr.)', 0.08) # More variability here
def test_invalid_settings_input(): """Test handling of wrong inputs for settings dictionary.""" # Wrong input type for settings dict. with pytest.raises(TypeError): JidtDiscreteMI(settings=1) with pytest.raises(TypeError): JidtDiscreteCMI(settings=1) with pytest.raises(TypeError): JidtDiscreteAIS(settings=1) with pytest.raises(TypeError): JidtDiscreteTE(settings=1) with pytest.raises(TypeError): JidtGaussianMI(settings=1) with pytest.raises(TypeError): JidtGaussianCMI(settings=1) with pytest.raises(TypeError): JidtGaussianAIS(settings=1) with pytest.raises(TypeError): JidtGaussianTE(settings=1) with pytest.raises(TypeError): JidtKraskovMI(settings=1) with pytest.raises(TypeError): JidtKraskovCMI(settings=1) with pytest.raises(TypeError): JidtKraskovAIS(settings=1) with pytest.raises(TypeError): JidtKraskovTE(settings=1) # Test if settings dict is initialised correctly. e = JidtDiscreteMI() assert type( e.settings) is dict, 'Did not initialise settings as dictionary.' e = JidtDiscreteCMI() assert type( e.settings) is dict, 'Did not initialise settings as dictionary.' e = JidtGaussianMI() assert type( e.settings) is dict, 'Did not initialise settings as dictionary.' e = JidtGaussianCMI() assert type( e.settings) is dict, 'Did not initialise settings as dictionary.' e = JidtKraskovMI() assert type( e.settings) is dict, 'Did not initialise settings as dictionary.' e = JidtKraskovCMI() assert type( e.settings) is dict, 'Did not initialise settings as dictionary.' # History parameter missing for AIS and TE estimation. with pytest.raises(RuntimeError): JidtDiscreteAIS(settings={}) with pytest.raises(RuntimeError): JidtDiscreteTE(settings={}) with pytest.raises(RuntimeError): JidtGaussianAIS(settings={}) with pytest.raises(RuntimeError): JidtGaussianTE(settings={}) with pytest.raises(RuntimeError): JidtKraskovAIS(settings={}) with pytest.raises(RuntimeError): JidtKraskovTE(settings={})
def test_local_values(): """Test estimation of local values and their return type.""" expected_mi, source, s, target = _get_gauss_data(expand=False) ar_proc, s = _get_ar_data(expand=False) settings = { 'discretise_method': 'equal', 'n_discrete_bins': 4, 'history_target': 1, 'history': 2, 'local_values': True } # MI - Discrete mi_estimator = JidtDiscreteMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtDiscreteMI', 'MI', 0.08) # More variability here assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Gaussian mi_estimator = JidtGaussianMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtGaussianMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Kraskov mi_estimator = JidtKraskovMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtKraskovMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # CMI - Discrete cmi_estimator = JidtDiscreteCMI(settings=settings) mi = cmi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtDiscreteCMI', 'CMI', 0.08) # More variability here assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Gaussian mi_estimator = JidtGaussianCMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtGaussianCMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Kraskov mi_estimator = JidtKraskovCMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtKraskovCMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # TE - Discrete te_estimator = JidtDiscreteTE(settings=settings) mi = te_estimator.estimate(source[1:], target[:-1]) _assert_result(np.mean(mi), expected_mi, 'JidtDiscreteTE', 'TE', 0.08) # More variability here assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # TE - Gaussian mi_estimator = JidtGaussianTE(settings=settings) mi = mi_estimator.estimate(source[1:], target[:-1]) _assert_result(np.mean(mi), expected_mi, 'JidtGaussianTE', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # TE - Kraskov mi_estimator = JidtKraskovTE(settings=settings) mi = mi_estimator.estimate(source[1:], target[:-1]) _assert_result(np.mean(mi), expected_mi, 'JidtKraskovTE', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # AIS - Kraskov ais_estimator = JidtKraskovAIS(settings=settings) mi_k = ais_estimator.estimate(ar_proc) assert type(mi_k) is np.ndarray, 'Local values are not a numpy array.' # AIS - Discrete ais_estimator = JidtDiscreteAIS(settings=settings) mi_d = ais_estimator.estimate(ar_proc) assert type(mi_d) is np.ndarray, 'Local values are not a numpy array.' # TODO should we compare these? # _compare_result(np.mean(mi_k), np.mean(mi_d), # 'JidtKraskovAIS', 'JidtDiscreteAIS', 'AIS (AR process)') # AIS - Gaussian ais_estimator = JidtGaussianAIS(settings=settings) mi_g = ais_estimator.estimate(ar_proc) assert type(mi_g) is np.ndarray, 'Local values are not a numpy array.' _compare_result(np.mean(mi_k), np.mean(mi_g), 'JidtKraskovAIS', 'JidtGaussianAIS', 'AIS (AR process)')
def test_local_values(): """Test estimation of local values and their return type.""" expected_mi, source, s, target = _get_gauss_data(expand=False) ar_proc, s = _get_ar_data(expand=False) settings = {'discretise_method': 'equal', 'n_discrete_bins': 4, 'history_target': 1, 'history': 2, 'local_values': True} # MI - Discrete mi_estimator = JidtDiscreteMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtDiscreteMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Gaussian mi_estimator = JidtGaussianMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtGaussianMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Kraskov mi_estimator = JidtKraskovMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtKraskovMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # CMI - Discrete cmi_estimator = JidtDiscreteCMI(settings=settings) mi = cmi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtDiscreteCMI', 'CMI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Gaussian mi_estimator = JidtGaussianCMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtGaussianCMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # MI - Kraskov mi_estimator = JidtKraskovCMI(settings=settings) mi = mi_estimator.estimate(source, target) _assert_result(np.mean(mi), expected_mi, 'JidtKraskovCMI', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # TE - Discrete te_estimator = JidtDiscreteTE(settings=settings) mi = te_estimator.estimate(source[1:], target[:-1]) _assert_result(np.mean(mi), expected_mi, 'JidtDiscreteTE', 'TE') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # TE - Gaussian mi_estimator = JidtGaussianTE(settings=settings) mi = mi_estimator.estimate(source[1:], target[:-1]) _assert_result(np.mean(mi), expected_mi, 'JidtGaussianTE', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # TE - Kraskov mi_estimator = JidtKraskovTE(settings=settings) mi = mi_estimator.estimate(source[1:], target[:-1]) _assert_result(np.mean(mi), expected_mi, 'JidtKraskovTE', 'MI') assert type(mi) is np.ndarray, 'Local values are not a numpy array.' # AIS - Kraskov ais_estimator = JidtKraskovAIS(settings=settings) mi_k = ais_estimator.estimate(ar_proc) assert type(mi_k) is np.ndarray, 'Local values are not a numpy array.' # AIS - Discrete ais_estimator = JidtDiscreteAIS(settings=settings) mi_d = ais_estimator.estimate(ar_proc) assert type(mi_d) is np.ndarray, 'Local values are not a numpy array.' # TODO should we compare these? # _compare_result(np.mean(mi_k), np.mean(mi_d), # 'JidtKraskovAIS', 'JidtDiscreteAIS', 'AIS (AR process)') # AIS - Gaussian ais_estimator = JidtGaussianAIS(settings=settings) mi_g = ais_estimator.estimate(ar_proc) assert type(mi_g) is np.ndarray, 'Local values are not a numpy array.' _compare_result(np.mean(mi_k), np.mean(mi_g), 'JidtKraskovAIS', 'JidtGaussianAIS', 'AIS (AR process)')
# and call JIDT Discrete estimators using the build-in discretization. n = 1000 covariance = 0.4 corr_expected = covariance / (1 * np.sqrt(covariance**2 + (1 - covariance)**2)) expected_mi = calculate_mi(corr_expected) source_cor = np.random.normal(0, 1, size=n) # correlated src source_uncor = np.random.normal(0, 1, size=n) # uncorrelated src target = (covariance * source_cor + (1 - covariance) * np.random.normal(0, 1, size=n)) settings = {'discretise_method': 'equal', 'n_discrete_bins': 5} est = JidtDiscreteCMI(settings) cmi = est.estimate(source_cor, target, source_uncor) print('Estimated CMI: {0:.5f}, expected CMI: {1:.5f}'.format(cmi, expected_mi)) settings['history_target'] = 1 est = JidtDiscreteTE(settings) te = est.estimate(source_cor[1:n], target[0:n - 1]) print('Estimated TE: {0:.5f}, expected TE: {1:.5f}'.format(te, expected_mi)) # 2 Use network inference algorithms on discrete data. n_procs = 5 alphabet_size = 5 data = Data(np.random.randint(0, alphabet_size, size=(n, n_procs)), dim_order='sp', normalise=False) # don't normalize discrete data # Initialise analysis object and define settings network_analysis = MultivariateTE() settings = { 'cmi_estimator': 'JidtDiscreteCMI', 'alph1': alphabet_size, # provide initial alphabet size for
def compute_bTE_all_pairs(traj): nodes_n = traj.par.topology.initial.nodes_n # Generate initial network G = network_dynamics.generate_network(traj.par.topology.initial) # Get adjacency matrix adjacency_matrix = np.array( nx.to_numpy_matrix(G, nodelist=np.array(range(0, nodes_n)), dtype=int)) # Add self-loops np.fill_diagonal(adjacency_matrix, 1) # Generate initial node coupling coupling_matrix = network_dynamics.generate_coupling( traj.par.node_coupling.initial, adjacency_matrix) # Generate delay delay_matrices = network_dynamics.generate_delay(traj.par.delay.initial, adjacency_matrix) # Generate coefficient matrices coefficient_matrices = np.transpose(delay_matrices * coupling_matrix, (0, 2, 1)) # Run dynamics time_series = network_dynamics.run_dynamics(traj.par.node_dynamics, coefficient_matrices) # initialise an empty data object dat = Data() # Load time series if traj.par.node_dynamics.model == 'boolean_random': normalise = False elif traj.par.node_dynamics.model == 'AR_gaussian_discrete': normalise = True dat = Data(time_series, dim_order='psr', normalise=normalise) data = dat.data # Compute empirical bTE between all pairs lag = 1 history_target = traj.par.estimation.history_target settings = {} settings['source_target_delay'] = lag settings['history_source'] = traj.par.estimation.history_source if traj.par.node_dynamics.model == 'boolean_random': settings['history_target'] = history_target est = JidtDiscreteTE(settings) elif traj.par.node_dynamics.model == 'AR_gaussian_discrete': settings['history_target'] = history_target est = JidtGaussianTE(settings) bTE_empirical_matrix = np.full((nodes_n, nodes_n), np.NaN) for X in range(nodes_n): for Y in range(nodes_n): if (adjacency_matrix[X, Y] > 0) and (X != Y): bTE_empirical_matrix[X, Y] = est.estimate(data[X, :, 0], data[Y, :, 0]) # Add results to the trajectory # The wildcard character $ will be replaced by the name of the current run, # formatted as `run_XXXXXXXX` traj.f_add_result('$.topology.initial', adjacency_matrix=adjacency_matrix, comment='') traj.f_add_result('$.node_coupling.initial', coupling_matrix=coupling_matrix, coefficient_matrices=coefficient_matrices, comment='') traj.f_add_result('$.delay.initial', delay_matrices=delay_matrices, comment='') # traj.f_add_result( # '$.node_dynamics', # time_series=time_series, # comment='') traj.f_add_result(PickleResult, '$.bTE', bTE_empirical_matrix=bTE_empirical_matrix, comment='') jSystem = jpype.JPackage("java.lang").System jSystem.gc()