import matplotlib.pyplot as plt os.environ["TF_CPP_MIN_LOG_LEVEL"]='2' ## PRI from ML_funcs import unpickle,create_model ## setting enviroment and data MODEL_DIR = "F:/data_eeg/result_model_LA" dic_file = 'F:/data_eeg/all_pkl/result_LA_pkl.pkl' BATCH_SIZE = 128 EPOCHS = 20 num_Classes = 4 checkpoint = ModelCheckpoint(filepath=os.path.join(MODEL_DIR,'M_{epoch:03d}_l_{loss:.3f}_vl_{val_loss:.3f}.hdf5'),save_best_only=True) all_data = unpickle(dic_file) train_data, test_data, train_labels_one_hot, test_labels_one_hot = train_test_split(all_data['data'], all_data['labels']) #print('before',train_labels_one_hot.shape, test_labels_one_hot.shape) train_data = np.array(train_data) test_data = np.array(test_data) print(train_data.shape,test_data.shape) train_data = np.reshape(train_data, (train_data.shape[0],600,1)) test_data = np.reshape(test_data, (test_data.shape[0],600,1)) # do it if need to change data struct print(train_data.shape,test_data.shape) # train_labels_one_hot = keras.utils.to_categorical(train_labels_one_hot, num_Classes) # test_labels_one_hot = keras.utils.to_categorical(test_labels_one_hot, num_Classes) train_data = train_data.astype('float32') test_data = test_data.astype('float32') print(train_data.shape, len(train_labels_one_hot))
from ML_funcs import unpickle, minmax from sklearn.decomposition import PCA import sklearn.preprocessing import matplotlib.pyplot as plt import random import keras import pickle as pkl dic_file_fa = 'F:/Insula-Gcamp6/record/record_split_by_behav/50%_nogo_hit_trials.pkl' dic_file_cr = 'F:/Insula-Gcamp6/record/record_split_by_behav/50%_nogo_miss_trials.pkl' result_path = 'C:/Users/manggny/Desktop/record_all/' num_Classes = 2 #checkpoint = ModelCheckpoint(filepath=os.path.join(MODEL_DIR,'M_{epoch:03d}_l_{loss:.3f}_vl_{val_loss:.3f}.hdf5'),save_best_only=True) data_fa = unpickle(dic_file_fa) data_cr = unpickle(dic_file_cr) ticks = 28 start = 0 time_acc = np.zeros(ticks) # 1 for 0.2s, all 3s (-2s~1s) loop_time = 1000 time_step = 25 lower = min(np.alen(data_fa), np.alen(data_cr)) # for i in range(np.alen(data_fa)): # data_fa[i,:] = sklearn.preprocessing.scale(data_fa[i,:],axis=0) #,feature_range=(-1,1) # for i in range(np.alen(data_cr)): # data_cr[i, :] = sklearn.preprocessing.scale(data_cr[i, :], axis=0) #,feature_range=(-1,1) for loop in range(loop_time): idx_data_fa = list(range(np.alen(data_fa)))
import numpy as np import pickle as pkl import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.preprocessing import StandardScaler from sklearn.cluster import MiniBatchKMeans from matplotlib import style style.use('ggplot') from ML_funcs import unpickle dic_name = 'C:/Users/Administrator/Desktop/EEGDATA/result_sub_features_4labels.pkl' data = unpickle(dic_name) print(np.array(data['data']).shape) result_dic = {'data':[],'labels':data['labels']} data = np.array(data['data']) tr,ti,elec = np.shape(data) result = np.zeros((tr,7,elec)) for i in range(elec): data_dummy = data[:,:,i] scal = StandardScaler() scal.fit(data_dummy) s_data = scal.transform(data_dummy) p = PCA(n_components = 7)
from ML_funcs import unpickle, mean_range, create_model import numpy as np import pickle as pkl import sklearn.preprocessing all_data_path = 'C:/Users/Administrator/Desktop/EEGDATA/result_boot_r_0702.pkl' data_all = unpickle(all_data_path) print(data_all['labels'].count(0), data_all['labels'].count(1), data_all['labels'].count(2), data_all['labels'].count(3)) trial_num = len(data_all['data']) datas = [] labels = [] print(data_all['data'][1].shape) for i in range(trial_num): _, col = data_all['data'][i].shape print(data_all['data'][i].shape) if (data_all['labels'][i] == 1): # or (data_all['labels'][i] == 2): print(data_all['labels'][i]) for k in range(col): data_all['data'][i][:, k] = sklearn.preprocessing.scale( data_all['data'][i][:, k]) datas.append(data_all['data'][i]) labels.append(1) elif (data_all['labels'][i] == 0): print(data_all['labels'][i]) for k in range(col): data_all['data'][i][:, k] = sklearn.preprocessing.scale( data_all['data'][i][:, k]) datas.append(data_all['data'][i])
from typing import List from ML_funcs import unpickle import matplotlib.pyplot as plt import numpy as np from numpy.core._multiarray_umath import ndarray result_path = 'C:/Users/Administrator/Desktop/EEGDATA/result_svm/' path = 'C:/Users/Administrator/Desktop/EEGDATA/all_txts' dic_path = 'C:/Users/Administrator/Desktop/EEGDATA/all_pkl/svm_result_acc_r_boot_0702.pkl' data = unpickle(dic_path) print(data) mean_acc = [] for i in range(35): mean_acc.append(np.mean(data[:,i])) print(np.mean(data[:,i])) dummy = mean_acc[:] sort_idx =[] print(mean_acc) for i in range(35): best_acc = max(dummy) for k in range(35): if dummy[k] == best_acc and best_acc > 0: sort_idx.append(k) dummy[k] = -1 break print(sort_idx) #print(mean_acc)