# Random splitting of data to train our model X_train, X_test, y_train, y_test = train_test_split(new_ukbb, y, test_size=0.5, random_state=0) X_train = X_train.dropna(subset=['21003-2.0']) X_test = X_test.dropna(subset=['21003-2.0']) merged_data = pd.read_csv(path_to_merge_brain, usecols=columns) dmriDict = collections.OrderedDict( chain(brain_dmri_fa.items(), brain_dmri_icvf.items(), brain_dmri_isovf.items(), brain_dmri_l1.items(), brain_dmri_l2.items(), brain_dmri_l3.items(), brain_dmri_md.items(), brain_dmri_mo.items(), brain_dmri_od.items())) dmriDict.update({'eid': 'eid'}) dmri = pd.DataFrame(merged_data, columns=dmriDict.keys()) dmri = dmri.dropna() def load_combine_data(X_split, merged_data, dmri): data_frame = [] connectomes = [] eids = [] for e_id in X_split.eid: this_eid_data = merged_data[merged_data['eid'] == e_id] this_path = os.path.join(path_to_matrices, str(e_id) + '_20227_2_0.txt') this_dmri_data = dmri[dmri['eid'] == e_id]
new_ukbb = new_ukbb.drop(columns=['20016-2.0'], errors='ignore') # Random splitting of data to train our model X_train, X_test, y_train, y_test = train_test_split( new_ukbb, y, test_size=0.5, random_state=0) X_train = X_train[['eid', '20127-0.0']].dropna() X_test = X_test[['eid', '20127-0.0']].dropna() merged_data = pd.read_csv(path_to_merge_brain, usecols=columns) dmriDict = collections.OrderedDict(chain(brain_dmri_fa.items(), brain_dmri_icvf.items(), brain_dmri_isovf.items(), brain_dmri_l1.items(), brain_dmri_l2.items(), brain_dmri_l3.items(), brain_dmri_md.items(), brain_dmri_mo.items(), brain_dmri_od.items())) dmriDict.update({'eid': 'eid'}) dmri = pd.DataFrame(merged_data, columns=dmriDict.keys()) dmri = dmri.dropna() def load_combine_data(X_split, merged_data, dmri): data_frame = [] eids = [] for e_id in X_split.eid: this_eid_data = merged_data[merged_data['eid'] == e_id] this_dmri_data = dmri[dmri['eid'] == e_id]