isomap = np.load( os.path.join(experiment.parent.subdirs['analysis'], 'isomap.npy'))[:, :3] W = generate_weights(isomap) # Create paths for saving transition_directory = create_folder(experiment.subdirs['analysis'], 'transitions') for condition, fish_info in experiment.data.groupby('condition'): print condition condition_directory = create_folder(transition_directory, condition) condition_bouts = mapped_bouts[mapped_bouts['ID'].isin( fish_info['ID'])] # Compute the transition matrix for each fish T = fish_transition_matrices(condition_bouts, state_col='exemplar', n_states=len(isomap), shuffle=False) np.save(os.path.join(condition_directory, 'transition_matrices.npy'), T) print T.shape # Redistribute transitions W = generate_weights(isomap, bandwidth=40.) WTW = redistribute_transitions(T, W) np.save( os.path.join(condition_directory, 'smoothed_transition_matrices.npy'), WTW) # Sum transitions over all fish T_all = T.sum(axis=0) WTW_all = redistribute_transitions(T_all, W) np.save(os.path.join(condition_directory, 'T.npy'), T_all) np.save(os.path.join(condition_directory, 'WTW.npy'), WTW_all)
from scipy import stats as ss import os modelling_directory = os.path.join(experiment.subdirs['analysis'], 'modelling') if __name__ == "__main__": mapped_bouts = pd.read_csv(os.path.join(experiment.subdirs['analysis'], 'mapped_bouts.csv'), index_col='transition_index', dtype={'ID': str, 'video_code': str}) n_clusters = len(mapped_bouts['module'].unique()) S = [] for shuffle in np.arange(1000): if shuffle % 10 == 0: print shuffle shuffled = fish_transition_matrices(mapped_bouts, state_col='module', shuffle=True, verbose=False) S.append(shuffled.sum(axis=0)) S = np.array(S) T = fish_transition_matrices(mapped_bouts, state_col='module', shuffle=False) T = T.sum(axis=0) model_params = np.array([S.mean(axis=0), S.std(axis=0)]) p_values_decrease = np.zeros((n_clusters, n_clusters)) for i in np.arange(n_clusters): for j in np.arange(n_clusters): norm = ss.norm(loc=model_params[0, i, j], scale=model_params[1, i, j]) p_values_decrease[i, j] = norm.cdf(T[i, j]) p_values_increase = 1 - p_values_decrease
if __name__ == "__main__": # Import data bouts = pd.read_csv(os.path.join(experiment.subdirs['analysis'], 'mapped_bouts.csv'), index_col='transition_index', dtype={ 'ID': str, 'video_code': str }) isomap = np.load(os.path.join(experiment.subdirs['analysis'], 'isomap.npy'))[:, :3] # Compute the transition matrix for each fish T = fish_transition_matrices(bouts) np.save(os.path.join(transition_directory, 'transition_matrices.npy'), T) print T.shape # Redistribute transitions W = generate_weights(isomap, bandwidth=40.) WTW = redistribute_transitions(T, W) np.save( os.path.join(transition_directory, 'smoothed_transition_matrices.npy'), WTW) # Sum transitions over all fish T_all = T.sum(axis=0) WTW_all = redistribute_transitions(T_all, W) np.save(os.path.join(transition_directory, 'T.npy'), T_all) np.save(os.path.join(transition_directory, 'WTW.npy'), WTW_all)
'video_code': str }) isomap = np.load(os.path.join(experiment.subdirs['analysis'], 'isomap.npy'))[:, :3] weights = generate_weights(isomap) n_fish = len(bouts['ID'].unique()) timer = Timer() timer.start() # Compute the average of 100 shuffled transition matrices for each fish print 'Generating shuffled matrices...', n_shuffles = 100 S = np.zeros((n_fish, len(isomap), len(isomap))) for shuffle in range(n_shuffles): S += fish_transition_matrices(bouts, shuffle=True, verbose=False) S /= n_shuffles S = redistribute_transitions(S, weights) np.save( os.path.join(transition_directory, 'shuffled_transition_matrices.npy'), S) print timer.convert_time(timer.lap()) S = np.load( os.path.join(transition_directory, 'shuffled_transition_matrices.npy')) T = np.load( os.path.join(transition_directory, 'smoothed_transition_matrices.npy')) # Generate permuted matrices print 'Generating permutations...', S_permuted = np.empty((n_permutations, S.shape[1], S.shape[2]))