def train_dataset_feature_intra( train_dir='train1', subject_list=['subject_1'], feature_type='TD4', dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O'], action_num=7, chan_num=4): print 'train_dataset_feature_intra......' my_clfs = ["LDA"] start_time = time.time() if feature_type == 'TD4': feat_num = 4 elif feature_type == 'TD5': feat_num = 5 chan_len = feat_num * chan_num # 0均值标准化标志 norm = '' for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type, action_num) # 是否进行0均值标准化 if z_score: trains = data_normalize(trains) norm = '_norm' classifier_lda.training_lda_TD4_intra( my_clfs, trains, classes, log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + norm, pos_list=channel_pos_list, num=1, chan_len=chan_len,action_num=action_num, feature_type=feature_type,chan_num=chan_num) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature(train_dir='train1', subject_list=['subject_1'], type='TD4', dataset='data1', fold_pre='250_100', z_score=False): # my_clfs = ["LDA", "SVC_linear", "SVC_rbf", "Logistic", "QDA", "GaussianNB"] # my_clfs = ["LDA", "QDA", "GaussianNB", "SVC_linear", "SVC_rbf", "Logistic"] my_clfs = ["LDA"] start_time = time.time() for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub) if z_score: trains = data_normalize(trains) sub = 'norm_' + sub # print trains.mean(axis=0), trains.std(axis=0) # sys.exit(0) classifier.training_lda_TD4(my_clfs, trains, classes, log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub, num=1) # # classifier.training_lda_TD4_cross(my_clfs, trains1, classes1, trains2, classes2, log_fold = 'TD4_data4_'+subject+'_1to2', num=1) # classifier.training_lda_TD4_cross(my_clfs, trains2, classes2, trains1, classes1, log_fold = 'TD4_data4_'+subject+'_2to1', num=1) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature_inter(train_dir='train1', subject_list=['subject_1'], feature_type='TD4', dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O'], action_num=11, chan_num=4): my_clfs = ["LDA"] start_time = time.time() if feature_type == 'TD4': feat_num = 4 elif feature_type == 'TD5': feat_num = 5 chan_len = feat_num * chan_num norm = '' channel_pos_list = channel_pos_list[1:] for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type, action_num) if z_score: trains = data_normalize(trains) norm = '_norm' trains_inter = trains[:, 0:chan_len] tests_inter = trains[:,chan_len:] training_lda_TD4_inter( my_clfs, trains_inter, tests_inter, classes, log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + norm, pos_list=channel_pos_list, num=1, chan_len=chan_len, action_num=action_num) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature_inter( train_dir='train1', subject_list=['subject_1'], type='TD4', dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O']): my_clfs = ["LDA"] start_time = time.time() for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub) # 选取15个动作中的前六个动作, 临时 # print trains.shape, classes.shape, trains.shape[0]/15*6 len_temp = trains.shape[0] / 15 * 6 trains = trains[0:len_temp, :] classes = classes[0:len_temp] # print trains.shape, classes.shape # sys.exit(0) if z_score: trains = data_normalize(trains) sub = 'norm_' + sub if dataset == 'data1': chan_span = 16 # 跨度 chan_num = 2 elif dataset == 'dataset4': chan_span = 0 # 跨度 chan_num = 4 if type == 'TD4': feat_num = 4 chan_len = feat_num * chan_span tests_inter = np.array([]) for idx, channel_pos in enumerate(channel_pos_list): start = idx*feat_num if idx == 0: trains_inter = np.concatenate( (trains[:, start: start + feat_num], trains[:, start+chan_len:start+chan_len+feat_num]), axis=1) elif idx!=0: test_temp = np.concatenate( (trains[:, start: start + feat_num], trains[:, start+chan_len:start+chan_len+feat_num]), axis=1) if tests_inter.shape[0] != 0: tests_inter = np.concatenate( (tests_inter, test_temp), axis=1) else: tests_inter = test_temp # print trains_inter.shape, tests_inter.shape # if idx == 5: # sys.exit(0) channel_pos_list = channel_pos_list[1:] # print channel_pos_list # print trains_inter.shape, tests_inter.shape # sys.exit(0) classifier.training_lda_TD4_inter( my_clfs, trains_inter, tests_inter, classes, log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub + '_updated', pos_list=channel_pos_list, num=1, chan_len=chan_num*feat_num) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature_inter(train_dir='train4_250_100', subject_list=['subject_1'], feature_type='TD4', dataset='data4', fold_pre='250_100', z_score=False, channel_pos_list=['S0'], action_num=7, chan_num=4): my_clfs = ["LDA"] start_time = time.time() channel_pos_list_shift = channel_pos_list[1:] if feature_type == 'TD4': feat_num = 4 # 特征维度 TD4:4 elif feature_type == 'TD5': feat_num = 5 chan_len = feat_num * chan_num # 16 norm = '' for sub in subject_list: trains, classes = data_load.load_feature_dataset( train_dir, sub, feature_type, action_num) if z_score: trains = data_normalize(trains) norm = '_norm' trains_S0 = trains[:, 0:chan_len] trains_shift = trains[:, chan_len:] num = 1 # 生成CCA映射矩阵 # classifier_lda_cca.generate_transform_equations( # trains_S0, trains_shift, pos_list=channel_pos_list_shift, chan_len=chan_len, subject=sub) # 进行中心训练策略 classifier_lda_cca.training_lda_TD4_inter( my_clfs, trains_S0, trains_shift, classes, log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + norm, pos_list=channel_pos_list_shift, chan_len=chan_len, chan_num=chan_num, feature_type=feature_type, action_num=action_num, num=num, subject=sub) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature_inter( train_dir='train4_250_100', subject_list=['subject_1'], feature_type='TD4', dataset='data4', fold_pre='250_100', z_score=False, channel_pos_list=['S0'], action_num=7, group_num=4): my_clfs = ["LDA"] start_time = time.time() # channel_pos_list_shift = channel_pos_list[1:] channel_pos_list_shift = channel_pos_list action_num = 7 group_num = 4 if feature_type == 'TD4': feat_num = 4 # 特征维度 TD4:4 elif feature_type == 'TD5': feat_num = 5 chan_num = 4 # 通道个数,4通道 chan_len = feat_num * chan_num # 16 for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type) tests_inter = np.array([]) trains_inter = trains[:, 0:chan_len] trains_simu, classes_simu = guassion_simu(trains_inter, classes, sub, action_num, chan_num, feat_num) # trains_simu, classes_simu = trains_inter, classes tests_inter = trains if z_score: trains_simu = data_normalize(trains_simu) tests_inter = data_normalize(tests_inter) sub = 'norm_' + sub num = 3 classifier_lda.training_lda_TD4_inter( my_clfs, trains_simu, classes_simu, tests_inter, classes, log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + '_simu1', pos_list=channel_pos_list_shift, chan_len=chan_len, group_num=group_num, feature_type=feature_type, action_num=action_num, num=num) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature_inter( train_dir="train1", subject_list=["subject_1"], feature_type="TD4", dataset="data1", fold_pre="250_100", z_score=False, channel_pos_list=["O"], action_num=11, chan_num=4, ): my_clfs = ["LDA"] start_time = time.time() if feature_type == "TD4": feat_num = 4 elif feature_type == "TD5": feat_num = 5 chan_len = feat_num * chan_num norm = "" channel_pos_list = channel_pos_list[1:] for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type, action_num) if z_score: trains = data_normalize(trains) norm = "_norm" trains_inter = trains[:, 0:chan_len] tests_inter = trains[:, chan_len:] training_lda_TD4_inter( my_clfs, trains_inter, tests_inter, classes, log_fold=fold_pre + "/" + feature_type + "_" + dataset + "_" + sub + norm, pos_list=channel_pos_list, num=1, chan_len=chan_len, action_num=action_num, ) print "Total times: ", time.time() - start_time, "s"
def train_dataset_feature_intra(train_dir='train1', subject_list=['subject_1'], feature_type='TD4', dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O'], action_num=7, chan_num=4): print 'train_dataset_feature_intra......' my_clfs = ["LDA"] start_time = time.time() if feature_type == 'TD4': feat_num = 4 if feature_type == 'TD5': feat_num = 5 chan_len = feat_num * chan_num # 0均值标准化标志 norm = '' for sub in subject_list: trains, classes = data_load.load_feature_dataset( train_dir, sub, feature_type, action_num) # 是否进行0均值标准化 if z_score: trains = data_normalize(trains) norm = '_norm' classifier_lda_cca.training_lda_TD4_intra( my_clfs, trains, classes, log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + norm, pos_list=channel_pos_list, num=1, chan_len=chan_len, action_num=action_num, feature_type=feature_type, chan_num=chan_num) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature_intra( train_dir='train1', subject_list=['subject_1'], type='TD4', dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O']): # my_clfs = ["LDA", "SVC_linear", "SVC_rbf", "Logistic", "QDA", "GaussianNB"] # my_clfs = ["LDA", "QDA", "GaussianNB", "SVC_linear", "SVC_rbf", # "Logistic"] my_clfs = ["LDA"] start_time = time.time() for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub) if z_score: trains = data_normalize(trains) sub = 'norm_' + sub if dataset == 'data1': chan_span = 17 # 跨度 elif dataset == 'dataset4': chan_span = 0 # 跨度, if type == 'TD4': feat_num = 4 chan_len = feat_num * chan_span for idx, channel_pos in enumerate(channel_pos_list): start = idx*feat_num # print start, start+feat_num, start+chan_len, start+feat_num+chan_len trains_intra = np.concatenate( (trains[:, start: start + feat_num], trains[:, start+chan_len:start+chan_len+feat_num]), axis=1) # print trains_intra.shape # if idx == 1 : # sys.exit(0) # print 'Trains and classes: ', trains_intra.shape, classes.shape, idx * chan_len, idx * chan_len + chan_len classifier.training_lda_TD4_intra( my_clfs, trains, classes, log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub + '_updated', log_file_pos=channel_pos, num=1) # # classifier.training_lda_TD4_cross(my_clfs, trains1, classes1, trains2, classes2, log_fold = 'TD4_data4_'+subject+'_1to2', num=1) # classifier.training_lda_TD4_cross(my_clfs, trains2, classes2, trains1, classes1, log_fold = 'TD4_data4_'+subject+'_2to1', num=1) print "Total times: ", time.time() - start_time, 's'
def train_dataset_feature(train_dir='train1', subject_list=['subject_1'], type='TD4', dataset='data1', fold_pre='250_100', z_score=False): # my_clfs = ["LDA", "SVC_linear", "SVC_rbf", "Logistic", "QDA", "GaussianNB"] # my_clfs = ["LDA", "QDA", "GaussianNB", "SVC_linear", "SVC_rbf", "Logistic"] my_clfs = ["LDA"] start_time = time.time() for sub in subject_list: trains, classes = data_load.load_feature_dataset(train_dir, sub) if z_score: trains = data_normalize(trains) sub = 'norm_' + sub # print trains.mean(axis=0), trains.std(axis=0) # sys.exit(0) classifier.training_lda_TD4( my_clfs, trains, classes, log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub, num=1) # # classifier.training_lda_TD4_cross(my_clfs, trains1, classes1, trains2, classes2, log_fold = 'TD4_data4_'+subject+'_1to2', num=1) # classifier.training_lda_TD4_cross(my_clfs, trains2, classes2, trains1, classes1, log_fold = 'TD4_data4_'+subject+'_2to1', num=1) print "Total times: ", time.time() - start_time, 's'