def save_clusters(y, offset, filename): i = 0 y = y + offset center_list = datasets.load_centers_all() for center in center_list: for person in center.persons: if person.label == 2: person.label = y[i] i += 1 for center in center_list: center.save_labels(filename)
#%% import center import datasets import numpy as np import mask import nibabel as nib from sklearn.preprocessing import OneHotEncoder centers = datasets.load_centers_all() def get_index(lst, item): return [i for i in range(len(lst)) if lst[i] > item] def onehot(n_labels, labels): return np.eye(n_labels)[labels] def minmax(x, axis=0): _min = np.min(x, axis=axis) _max = np.max(x, axis=axis) return (x - _min) / (_max - _min) def z_norm(x, axis=0): _mean = np.mean(x, axis=axis) _std = np.std(x, axis=axis) return (x - _mean) / _std
#%% import datasets center_list = datasets.load_centers_all() # %% #prepare HYDRA csv import csv import numpy as np feature_path = './matlab/HYDRA/data/features.csv' covariate_path = './matlab/HYDRA/data/covariate.csv' i = 0 with open(feature_path, 'w', newline='') as feature_file: header = ['id'] + ['feature_{}'.format(i) for i in range(456)] + ['group'] featureswriter = csv.DictWriter(feature_file, fieldnames=header) featureswriter.writeheader() with open(covariate_path, 'w', newline='') as covariate_file: covariatewriter = csv.writer(covariate_file) for center in center_list: features_rv, labels = center.get_csv_values(flatten=True) features_ct, _ = center.get_csv_values( prefix='cortical_thickness/{}.csv', flatten=True) features = np.concatenate([features_rv, features_ct], axis=1) personal_infos, _ = center.get_presonal_info_values() tcgws, _ = center.get_tivs_cgws() for feature, label, personal_info, tcgw in zip( features, labels, personal_infos, tcgws): if label == 2: label = 1 elif label == 0: label = -1 else: continue