for data in data_ref1: df = pd.DataFrame(data) df.columns = list_features data_ref.append(df[list_features_final].values) model = ModelHMM() model.train(data_ref, labels_ref, list_features_final, dim_features) data_ref = [] for data in data_test: df = pd.DataFrame(data) df.columns = list_features data_ref.append(df[list_features_final].values) pred_labels, proba = model.test_model(data_ref) F1_temp = [] for i in range(len(labels_test)): F1_temp.append( tools.compute_F1_score(labels_test[i], pred_labels[i], list_states[num_track])) F1_score.append(np.mean(F1_temp)) dim_score.append(dim) feaures_save.append(str(list_features_final)) score_totaux = pd.DataFrame({ 'nbr_components': dim_score, 'score': F1_score, 'features': feaures_save
best_features_wrapper, dim_features) if (save): model.save_model(path_model, name_model, "load_handling") #### Test data_fisher = [] data_wrapper = [] for id_subject in id_test: df = pd.DataFrame(data_win2[id_subject]) df.columns = list_features data_fisher.append(df[best_features_fisher].values) data_wrapper.append(df[best_features_wrapper].values) predict_labels_fisher, proba = model_fisher.test_model( data_fisher) predict_labels_wrapper, proba = model_wrapper.test_model( data_wrapper) time_test = [] for id_subject in id_test: time_test.append(timestamps[id_subject]) for i in range(len(labels_test)): conf_mat = tools.compute_confusion_matrix( predict_labels_fisher[i], labels_test[i], list_states) confusion_matrix_fisher += conf_mat conf_mat = tools.compute_confusion_matrix( predict_labels_wrapper[i], labels_test[i], list_states) confusion_matrix_wrapper += conf_mat
train_set = tools.reduce_data_to_features(data_train, list_features, list_features_final) test_set = tools.reduce_data_to_features(data_test, list_features, list_features_final) dim_features = np.ones(len(list_features_final)) print('DEBUG list of final features ', list_features_final) plt.plot(train_set[0][:, 3]) plt.show() # Training the model model = ModelHMM() model.train(train_set, labels_train, list_features_final, dim_features) # Testing the model pred_labels, proba = model.test_model(test_set) #debug sere #pred_labels, proba les ecrire dans un fichier F1_temp = [] for i in range(len(labels_test)): F1_temp.append( tools.compute_F1_score(labels_test[i], pred_labels[i], list_states[num_track])) F1_score.append(np.mean(F1_temp)) print(F1_score) if (flag_save):
(len(list_states), len(list_states))) for nbr_test in range(nbr_cross_val): data_ref, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2( data_win, real_labels, ratio) model = ModelHMM() model.train(data_ref, labels_ref, sub_list_features, np.ones(len(sub_list_features))) #### Test time_test = [] for id_subject in id_test: time_test.append(timestamps[id_subject]) predict_labels, proba = model.test_model(data_test) for i in range(len(predict_labels)): conf_mat = tools.compute_confusion_matrix( predict_labels[i], labels_test[i], list_states) confusion_matrix += conf_mat MCC += tools.compute_MCC_score( predict_labels[i], labels_test[i], list_states) / len(predict_labels) prec_total, recall_total, F1_score = tools.compute_score( confusion_matrix) acc = tools.get_accuracy(confusion_matrix) F1_S = F1_score # F1_S = MCC/nbr_cross_val if (len(score) == 0):
data_test_action = [] accu = 0 total_ = 0 for id_subject in id_test: time_test.append(timestamps[id_subject]) labels_test_details.append(labels_details[id_subject]) data_details_test.append(data_details[id_subject]) labels_test_detailed_posture.append( labels_posture_detailed[id_subject]) data_test_detailed_posture.append( data_detailed_posture[id_subject]) labels_test_action.append( labels_current_action[id_subject]) data_test_action.append(data_current_action[id_subject]) predict_labels, proba = model.test_model(data_test) predict_labels_details, proba_details = model_details.test_model( data_details_test) predict_labels_detailed_posture, proba = model_detailed_posture.test_model( data_test_detailed_posture) predict_labels_action, proba = model_action.test_model( data_test_action) # time, ground_truth, prediction, id_sample_start, id_sample_end = tools.prepare_segment_analysis(time_test, predict_labels, labels_test, id_test) # predict_labels2 = deepcopy(predict_labels) labels_final = deepcopy(predict_labels) list_states_posture_final = [] for i in range(len(predict_labels)):