def load_data_and_label(sel): data_name_mean = os.path.join(sel.mat_path, sel.feature_mean) data_name_std = os.path.join(sel.mat_path, sel.feature_std) label_name = os.path.join(sel.mat_path, sel.label) sel.data_mean = read_mat(data_name_mean, dataset_name=None) sel.data_std = read_mat(data_name_std, dataset_name=None) sel.label = pd.read_excel(label_name)['诊断'] return sel
def load_onemat_and_processing(sel, i, all_mat_, len_all, s): # load mat mat = read_mat(all_mat_, sel.dataset_name) # 计算方差,均数等。可扩展。(如果时静态FC,则不执行) if sel.feature == 'staticFC': mat_std, mat_mean = mat, [] else: mat_std, mat_mean = sel.calc_std(mat) # 后处理特征,可扩展 if sel.feature == 'staticFC': mat_std_1d, mat_mean_1d = sel.postprocessing_features(mat_std), [] else: mat_std_1d = sel.postprocessing_features(mat_std) mat_mean_1d = sel.postprocessing_features(mat_mean) # 打印load进度 if i % 10 == 0 or i == 0: print('{}/{}\n'.format(i, len_all)) if i % 50 == 0 and i != 0: e = time.time() remaining_running_time = (e - s) * (len_all - i) / i print('\nremaining time={:.2f} seconds \n'.format( remaining_running_time)) return mat_std_1d, mat_mean_1d
def load_data_and_label1(sel): sel.data=read_mat(sel.mat_path,dataset_name=None) sel.label=pd.read_excel(sel.label_path)['诊断'] sel.folder=pd.read_excel(sel.label_path)['folder'] return sel
def resultFusion(rootPath=r'D:\myCodes\LC_MVPA\Python\MVPA_Python\perm', datasetName=['predict', 'dec', 'y_sorted', 'weight']): # Fusion of all block results of permutation test fileName = selectFile(rootPath) dataset = [] for dsname in datasetName: Ds = [] for flname in fileName: _, ds = read_mat(flname, dsname) Ds.append(ds) dataset.append(Ds) all_metrics = pd.DataFrame(dataset) all_metrics = all_metrics.rename(index={ 0: 'predict', 1: 'dec', 2: 'y_sorted', 3: 'weight' }) y_true = all_metrics.loc['y_sorted'][0] y_pred = all_metrics.loc['predict'][0] y_true = pd.DataFrame(y_true) y_pred = pd.DataFrame(y_pred) from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score accuracy_score(y_true.T, y_pred.T) confusion_matrix(y_true.T, y_pred.T) pr[1] return dataset
def machine_learning(sel, order=[3, 4]): # label y = pd.concat([ sel.label[sel.label.values == order[0]], sel.label[sel.label.values == order[1]] ]) y = y.values # x/sel.mat if os.path.exists(os.path.join(sel.save_path, sel.feature + '.mat')): sel.mat = pd.DataFrame( read_mat(os.path.join(sel.save_path, sel.feature + '.mat'), None)) x = pd.concat([ sel.mat.iloc[sel.label.values == order[0], :], sel.mat.iloc[sel.label.values == order[1], :] ]) # #平衡测试 # y=np.hstack([y,y[-1:-70:-1]]) # x=pd.concat([x,x.iloc[-1:-70:-1]],axis=0) # y=y[60:] # x=x.iloc[60:,:] # print(sum(y==0),sum(y==1)) # 置换y # rand_ind=np.random.permutation(len(y)) # y=y[rand_ind] # cross-validation # 1) split data to training and testing datasets x_train, x_test, y_train, y_test = \ train_test_split(x, y, random_state=sel.random_state) # elasticNet print('elasticNetCV') sel = ENCV.elasticNetCV() sel.train(x_train, y_train) sel.test(x_test) results = sel.test(x_test).__dict__ # ============================================================================= # # # rfe # import lc_svc_rfe_cv_V2 as lsvc # model=lsvc.svc_rfe_cv(k=5,pca_n_component=0.85) # # results=model.main_svc_rfe_cv(x.values,y) # ============================================================================= results = results.__dict__ return results
def load_allmat(sel): # 多线程 s = time.time() print('loading all mat...\n') # 判断是否有FC mat文件 if os.path.exists(os.path.join(sel.save_path, sel.feature + '.mat')): sel.mat = pd.DataFrame( read_mat(os.path.join(sel.save_path, sel.feature + '.mat'), None)) print( 'Already have {}\nloaded all mat!\nrunning time={:.2f}'.format( sel.feature + '.mat', time.time() - s)) else: sel.all_mat = os.listdir(sel.file_path) all_mat_path = [ os.path.join(sel.file_path, all_mat_) for all_mat_ in sel.all_mat ] cores = multiprocessing.cpu_count() if sel.n_processess > cores: sel.n_processess = cores - 1 len_all = len(all_mat_path) sel.mat = pd.DataFrame([]) # 特征用std还是mean if sel.feature == 'mean': ith = 1 elif sel.feature == 'std': ith = 0 elif sel.feature == 'staticFC': ith = 0 else: print('###还未添加其他衡量dFC的指标,默认使用std###\n') ith = 0 # load mat... with ThreadPoolExecutor(sel.n_processess) as executor: for i, all_mat_ in enumerate(all_mat_path): task = executor.submit(sel.load_onemat_and_processing, i, all_mat_, len_all, s) sel.mat = pd.concat( [sel.mat, pd.DataFrame(task.result()[ith]).T], axis=0) # 保存后处理后的mat文件 if sel.if_save_post_mat: write_mat(fileName=os.path.join(sel.save_path, sel.feature + '.mat'), dataset_name=sel.feature, dataset=np.mat(sel.mat.values)) print('saved all {} mat!\n'.format(sel.feature))
Created on Tue Nov 13 20:55:51 2018 @author: lenovo """ import sys sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\MVPA\utils') from lc_read_write_Mat import read_mat import numpy as np import pandas as pd hcPath = r'I:\dynamicALFF\Results\DALFF\50_0.9\Statistical_Results\Signal\ROISignals_ROISignal_FWHM4_HC.mat' szPath = r'I:\dynamicALFF\Results\DALFF\50_0.9\Statistical_Results\Signal\ROISignals_ROISignal_FWHM4_SZ.mat' bdPath = r'I:\dynamicALFF\Results\DALFF\50_0.9\Statistical_Results\Signal\ROISignals_ROISignal_FWHM4_BD.mat' mddPath = r'I:\dynamicALFF\Results\DALFF\50_0.9\Statistical_Results\Signal\ROISignals_ROISignal_FWHM4_MDD.mat' dataset_struct, datasetHC = read_mat(hcPath, 'ROISignals') dataset_struct, datasetSZ = read_mat(szPath, 'ROISignals') dataset_struct, datasetBD = read_mat(bdPath, 'ROISignals') dataset_struct, datasetMDD = read_mat(mddPath, 'ROISignals') meanHC = pd.DataFrame(np.mean(datasetHC, axis=0)) meanSZ = pd.DataFrame(np.mean(datasetSZ, axis=0)) meanBD = pd.DataFrame(np.mean(datasetBD, axis=0)) meanMDD = pd.DataFrame(np.mean(datasetMDD, axis=0)) allData = pd.concat([meanHC, meanSZ, meanBD, meanMDD], axis=1) allData.index = [ '左侧额中回/额上回 ', '右侧额上回(靠内)', '右侧前扣带回 ', '右侧尾状核', '左侧尾状核', '右侧putamen', '左侧putamen', '右侧前岛叶', '左侧前岛叶', '右侧杏仁核 ', '左侧杏仁核 ', '右侧海马', '左侧海马', '右侧海马旁回', '左侧海马旁回', '右侧舌回', '左侧舌回', '右侧cuneus', '左侧cuneus', '右侧angular gyrus', '右侧中央后回'
plotting.plot_matrix(matrix, vmin=-1., vmax=1., colorbar=True, title='Power correlation matrix') # Tweak edge_threshold to keep only the strongest connections. plotting.plot_connectome(matrix, coords, title='Power correlation graph', edge_threshold='99.8%', node_size=20, colorbar=True) file = r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\results\results_dfc\group_centroids_2.mat' matrix = read_mat(file) yeo_file = r'D:\My_Codes\Github_Related\Github_Code\Template_CBIG\stable_projects\brain_parcellation\Yeo2011_fcMRI_clustering\1000subjects_reference\Yeo_JNeurophysiol11_SplitLabels\MNI152\Yeo2011_17Networks_N1000.split_components.FSL_MNI152_2mm.nii.gz' from nilearn import input_data smoothed_img = input_data.smooth_img(yeo_file) yeo = datasets.fetch_atlas_yeo_2011() print('Yeo atlas nifti image (3D) with 17 parcels and liberal mask is located ' 'at: %s' % yeo['thick_17']) coords = plotting.find_parcellation_cut_coords(labels_img=yeo['thick_17']) plotting.plot_connectome(matrix, coords, title='Power correlation graph', edge_threshold='99.8%', node_size=20, colorbar=True)
""" Created on Tue Aug 28 15:07:49 2018 @author: lenovo """ from lc_svc_oneVsRest import oneVsRest import numpy as np import pandas as pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils') sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\classfication') # X fileName = r'J:\分类测试_20180828\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\分类测试_20180828\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d in y] # predict and test