def test_neural_entropy_uniform(num_experiments, scramble=True): # Uniform filling of all the bins transfer_entropy = [] norm_entropy = [] data_per_bin = 1 num_data_points = int(.2 * BINS * BINS) print('num_data_points:{}'.format(num_data_points)) brain_output = np.zeros((num_data_points, 2)) row = 0 counter = 0 for i in range(BINS): for j in range(BINS): for _ in range(data_per_bin): counter += 1 if counter >= num_data_points: break brain_output[row, :] = [i / 100 + 0.0001, j / 100 + 0.0001] row += 1 if scramble: rs = RandomState(1) for _ in range(num_experiments): rs.shuffle(brain_output) norm_entropy.append(get_shannon_entropy_2d(brain_output)) transfer_entropy.append(get_transfer_entropy(brain_output)) else: for _ in range(num_experiments): norm_entropy.append(get_shannon_entropy_2d(brain_output)) transfer_entropy.append(get_transfer_entropy(brain_output)) print("Transfer Entropy on uniform bin data: {}".format(transfer_entropy)) print("Shannon Entropy on uniform bin data: {}".format(norm_entropy)) # plt.plot(brain_output) # plt.show() return norm_entropy, transfer_entropy
def test_coupled_oscillators(num_experiments): from dyadic_interaction.dynamical_systems import spring_mass_system transfer_entropy = [] norm_entropy = [] rs = RandomState(0) for _ in range(num_experiments): spring_data = spring_mass_system(masses=rs.uniform(1.0, 10.0, 2), constants=rs.uniform(1.0, 50.0, 2), lengths=rs.uniform(0.1, 5.0, 2)) pos = np.column_stack((spring_data[:, 0], spring_data[:, 2])) # transfer_entropy, local_te = get_transfer_entropy(pos, local=True) norm_entropy.append( get_shannon_entropy_2d(pos, min_v=pos.min(), max_v=pos.max())) transfer_entropy.append( get_transfer_entropy(pos, min_v=pos.min(), max_v=pos.max())) print("Transfer Entropy of spring positions: {}".format( transfer_entropy)) print("Shannon Entropy of spring positions: {}".format(norm_entropy)) # plt.plot(pos) # plt.show() # vel = np.column_stack((spring_data[:, 1], spring_data[:, 3])) # transfer_entropy = get_transfer_entropy(vel, log=True) # norm_entropy = get_shannon_entropy_2d(vel) # print("Transfer Entropy of spring velocities: {}".format(transfer_entropy)) # print("Shannon Entropy of spring velocities: {}".format(norm_entropy)) # plt.plot(vel) # plt.show() # plt.plot(local_te[0]) # plt.plot(local_te[1]) # plt.show() return norm_entropy, transfer_entropy
def analyze_sample_brain(): import json with open('dyadic_interaction/tmp_brains.json') as f: data = json.load(f) df = np.array(data) t1_a1 = df[0][0] t1_a2 = df[0][1] te1 = get_transfer_entropy(t1_a1, log=True) te2 = get_transfer_entropy(t1_a2, log=True) print('TE agent1: {}'.format(te1)) print('TE agent2: {}'.format(te2)) fig = plt.figure() ax = fig.add_subplot(2, 2, 1) ax.plot(t1_a1[150:, 0]) ax = fig.add_subplot(2, 2, 2) ax.plot(t1_a1[150:, 1]) ax = fig.add_subplot(2, 2, 3) ax.plot(t1_a2[150:, 0]) ax = fig.add_subplot(2, 2, 4) ax.plot(t1_a2[150:, 1]) plt.show()
def test_neural_entropy_random(num_experiments, num_data_points, distribution='uniform'): """ Simulate uncorrelated random arrays. :param num_experiments: how many simulations to run :param num_data_points: how many data points per time series :param distribution: normal or uniform """ transfer_entropy = [] norm_entropy = [] rs = RandomState(0) if distribution == 'normal': brain_output = rs.normal(0, 1, (num_experiments, num_data_points, 2)) for i in range(num_experiments): norm_entropy.append( get_shannon_entropy_2d(brain_output[i, :, :], min_v=-3., max_v=3.)) transfer_entropy.append( get_transfer_entropy(brain_output[i, :, :], min_v=-3., max_v=3.)) else: brain_output = rs.rand(num_experiments, num_data_points, 2) for i in range(num_experiments): norm_entropy.append(get_shannon_entropy_2d(brain_output[i, :, :])) transfer_entropy.append(get_transfer_entropy( brain_output[i, :, :])) print("Simulated {} experiments of {} data points".format( num_experiments, num_data_points)) print("Transfer Entropy on random {} data: {}".format( distribution, transfer_entropy)) print("Shannon Entropy on random {} data: {}".format( distribution, norm_entropy)) # plt.plot(brain_output) # plt.show() return norm_entropy, transfer_entropy
def test_neural_entropy_single(num_experiments, num_data_points): # Constant arrays of the same value (single bin) transfer_entropy = [] norm_entropy = [] brain_output = np.ones((num_data_points, 2)) for i in range(num_experiments): rs = RandomState(1) brain_output = add_noise(brain_output, rs, noise_level=1e-8) norm_entropy.append(get_shannon_entropy_2d(brain_output)) transfer_entropy.append(get_transfer_entropy(brain_output)) print("Transfer Entropy on 1D constant data: {}".format(transfer_entropy)) print("Shannon Entropy on 1D constant data: {}".format(norm_entropy)) # plt.plot(brain_output) # plt.show() return norm_entropy, transfer_entropy
def test_neural_entropy_constant(num_experiments, num_data_points): # Correlated and constant arrays transfer_entropy = [] norm_entropy = [] source = np.ones(num_data_points) destination = np.ones(num_data_points) / 2. brain_output = np.column_stack((source, destination)) for _ in range(num_experiments): rs = RandomState(1) brain_output = add_noise(brain_output, rs, noise_level=1e-8) # does rs keep going? norm_entropy.append(get_shannon_entropy_2d(brain_output)) transfer_entropy.append(get_transfer_entropy(brain_output)) print("Transfer Entropy on 2D constant data: {}".format(transfer_entropy)) print("Shannon Entropy on 2D constant data: {}".format(norm_entropy)) # plt.plot(brain_output) # plt.show() return norm_entropy, transfer_entropy
def test_neural_entropy_correlated(num_experiments, num_data_points, cov=0.99, delay=1): # One series random, the other correlated with the first at some delay transfer_entropy = [] norm_entropy = [] corr_expected = cov / (1 * math.sqrt(cov**2 + (1 - cov)**2)) entropy_expected = -0.5 * math.log(1 - corr_expected**2) rs = RandomState(0) for _ in range(num_experiments): brain_output = generate_correlated_data(num_data_points, cov, delay, rs) norm_entropy.append( get_shannon_entropy_2d(brain_output, min_v=-3., max_v=3.)) transfer_entropy.append( get_transfer_entropy(brain_output, delay, log=True, min_v=-3., max_v=3.)) # transfer_entropy, local_te = get_transfer_entropy(brain_output, delay, local=True) # local_te = np.array(local_te) # plt.plot(brain_output) # plt.show() # plt.plot(local_te[0]) # plt.plot(local_te[1]) # plt.show() print( "Transfer Entropy on correlated data ({} data points, covariance {}, delay {}): {}\n" "Expected TE: {}".format(num_data_points, cov, delay, transfer_entropy, entropy_expected)) print("Shannon Entropy on correlated data: {}".format(norm_entropy)) return norm_entropy, transfer_entropy
def compute_performance(self, t): performance_agent_AB = [] if self.entropy_type=='transfer': # it only applies to neural_outputs (with 2 neurons) # add random noise to data before calculating transfer entropy for a in range(2): if self.ghost_index == a: continue if self.isolation and a==1: continue if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy[t][a] for t in range(self.num_trials) ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t][a] all_values_for_computing_entropy = utils.add_noise( all_values_for_computing_entropy, self.random_state, noise_level=self.data_noise_level ) # calculate performance # TODO: understand what happens if reciprocal=False performance_agent_AB.append( get_transfer_entropy(all_values_for_computing_entropy, binning=True) ) elif self.entropy_type in ['shannon-1d', 'shannon-dd']: # shannon-1d, shannon-dd if self.entropy_target_value == 'distance': if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t] min_v, max_v= 0., 100. performance_agent_AB = [ get_shannon_entropy_dd_simplified( all_values_for_computing_entropy, min_v, max_v) ] if self.entropy_target_value == 'angle': # angle (apply modulo angle of 2*pi) # min_v, max_v= 0., 2*np.pi min_v, max_v= -np.pi/4, np.pi/4 for a in range(2): if self.ghost_index == a: continue if self.isolation and a==1: continue if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy[t][a] for t in range(self.num_trials) ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t][a] # all_values_for_computing_entropy = all_values_for_computing_entropy % 2*np.pi all_values_for_computing_entropy = all_values_for_computing_entropy.flatten() all_values_for_computing_entropy = np.diff(all_values_for_computing_entropy) performance_agent_AB.append( get_shannon_entropy_1d(all_values_for_computing_entropy, min_v, max_v) ) else: # neural min_v, max_v= 0., 1. for a in range(2): if self.ghost_index == a: continue if self.isolation and a==1: continue if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy[t][a] for t in range(self.num_trials) ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t][a] if self.entropy_type == 'shannon-dd': performance_agent_AB.append( get_shannon_entropy_dd_simplified(all_values_for_computing_entropy, min_v, max_v) ) else: # shannon-1d for c in range(self.num_brain_neurons): column_values = all_values_for_computing_entropy[:,c] performance_agent_AB.append( get_shannon_entropy_1d(column_values, min_v, max_v) ) else: # sample entropy # only applies to 1d data if self.entropy_target_value == 'neural': for a in range(2): if self.ghost_index == a: continue if self.isolation and a==1: continue if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy[t][a] for t in range(self.num_trials) ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t][a] for c in range(self.num_brain_neurons): column_values = all_values_for_computing_entropy[:,c] mean = column_values.mean() std = column_values.std() normalize_values = (column_values - mean) / std performance_agent_AB.append( _numba_sampen(normalize_values, order=2, r=(0.2 * DEFAULT_SAMPLE_ENTROPY_NEURAL_STD)) ) elif self.entropy_target_value == 'distance': if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t] mean = all_values_for_computing_entropy.mean() std = all_values_for_computing_entropy.std() normalize_values = (all_values_for_computing_entropy - mean) / std performance_agent_AB = [ _numba_sampen(normalize_values.flatten(), order=2, r=(0.2 * DEFAULT_SAMPLE_ENTROPY_DISTANCE_STD)) ] else: assert self.entropy_target_value == 'angle' for a in range(2): if self.ghost_index == a: continue if self.isolation and a==1: continue if self.concatenate: all_values_for_computing_entropy = np.concatenate([ self.values_for_computing_entropy[t][a] for t in range(self.num_trials) ]) else: all_values_for_computing_entropy = self.values_for_computing_entropy[t][a] all_values_for_computing_entropy = np.diff(all_values_for_computing_entropy) mean = all_values_for_computing_entropy.mean() std = all_values_for_computing_entropy.std() normalize_values = (all_values_for_computing_entropy - mean) / std performance_agent_AB.append( _numba_sampen(normalize_values.flatten(), order=2, r=(0.2 * DEFAULT_SAMPLE_ENTROPY_ANGLE_STD)) ) return performance_agent_AB