Exemple #1
0
            print(file_wrapper)
            list_features_final = get_best_features(file_wrapper)[0]
            dim_features = np.ones(len(list_features_final))

            for n_iter in range(nbr_cross_val):
                data_ref1, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2(
                    data_win2, real_labels[num_track], ratio)

                data_ref = []
                for data in data_ref1:
                    df = pd.DataFrame(data)
                    df.columns = list_features
                    data_ref.append(df[list_features_final].values)

                model = ModelHMM()
                model.train(data_ref, labels_ref, list_features_final,
                            dim_features)

                data_ref = []
                for data in data_test:
                    df = pd.DataFrame(data)
                    df.columns = list_features
                    data_ref.append(df[list_features_final].values)

                pred_labels, proba = model.test_model(data_ref)

                F1_temp = []
                for i in range(len(labels_test)):
                    F1_temp.append(
                        tools.compute_F1_score(labels_test[i], pred_labels[i],
                                               list_states[num_track]))
			#     indicesToKeep = finalDf['state'] == target
			#     ax.scatter(finalDf.loc[indicesToKeep, 'pc1']
			#                , finalDf.loc[indicesToKeep, 'pc2']
			#                , finalDf.loc[indicesToKeep, 'pc3']
			#                , s = 50)
			# ax.legend(list_states)
			# ax.grid()

			for id_train in range(len(data_ref)):
				data_ref[id_train] = pca.transform(data_ref[id_train])

			for id_test in range(len(data_test)):
				data_test[id_test] = pca.transform(data_test[id_test])

			model = ModelHMM()
			model.train(data_ref, labels_ref, col, dim_features)

			pred_labels, proba = model.test_model(data_test)

			F1_temp = []
			for i in range(len(labels_test)):
				F1_temp.append(tools.compute_F1_score(labels_test[i], pred_labels[i], list_states))

			F1_score.append(np.mean(F1_temp))
			dim_score.append(n_components)



		score_totaux = pd.DataFrame(
		{'nbr_components': dim_score,
		 'score': F1_score,
Exemple #3
0
        short = 0

        transition_error = []
        short_transition_error = 0

        MCC = 0
        F1_fisher = []
        F1_wrapper = []

        F1_f = []
        F1_w = []

        if (nbr_cross_val == 0):
            model = ModelHMM()
            model.train(data_win, real_labels, best_features, dim_features)

            if (save):
                model.save_model(path_model, name_model, "load_handling")

        for n_subject in range(len(list_participant)):
            data_reduce = deepcopy(data_win)
            labels_reduce = deepcopy(real_labels)

            if (test_generalisation):
                data_gen = []
                labels_gen = []
                seq_subject = 0
                count = []
                for i in range(len(info_participant)):
                    if (info_participant[i] == list_participant[n_subject]):
                for j in range(len(df_all_data)):
                    data_win.append(df_all_data[j][sub_list_features].values)

                F1_S = 0
                MCC = 0

                confusion_matrix = np.zeros(
                    (len(list_states), len(list_states)))
                for nbr_test in range(nbr_cross_val):

                    data_ref, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2(
                        data_win, real_labels, ratio)

                    model = ModelHMM()
                    model.train(data_ref, labels_ref, sub_list_features,
                                np.ones(len(sub_list_features)))

                    #### Test
                    time_test = []
                    for id_subject in id_test:
                        time_test.append(timestamps[id_subject])

                    predict_labels, proba = model.test_model(data_test)

                    for i in range(len(predict_labels)):
                        conf_mat = tools.compute_confusion_matrix(
                            predict_labels[i], labels_test[i], list_states)
                        confusion_matrix += conf_mat
                        MCC += tools.compute_MCC_score(
                            predict_labels[i], labels_test[i],
                            list_states) / len(predict_labels)
            data_win, real_labels[num_track], ratio_split)

        # Keep only the data related to the final list of features
        train_set = tools.reduce_data_to_features(data_train, list_features,
                                                  list_features_final)
        test_set = tools.reduce_data_to_features(data_test, list_features,
                                                 list_features_final)
        dim_features = np.ones(len(list_features_final))

        print('DEBUG  list of final features ', list_features_final)
        plt.plot(train_set[0][:, 3])
        plt.show()

        # Training the model
        model = ModelHMM()
        model.train(train_set, labels_train, list_features_final, dim_features)

        # Testing the model
        pred_labels, proba = model.test_model(test_set)

        #debug sere
        #pred_labels, proba les ecrire dans un fichier

        F1_temp = []
        for i in range(len(labels_test)):
            F1_temp.append(
                tools.compute_F1_score(labels_test[i], pred_labels[i],
                                       list_states[num_track]))

        F1_score.append(np.mean(F1_temp))
                    'score': sorted_score,
                })

                best_features = sorted_features_fisher[0:n_components]
                dim_features = np.ones(len(best_features))

                # print(sorted_features_fisher)

                data_reduce = []
                for data in data_ref:
                    df = pd.DataFrame(data)
                    df.columns = list_features
                    data_reduce.append(df[best_features].values)

                model = ModelHMM()
                model.train(data_reduce, labels_ref, best_features,
                            dim_features)

                data_reduce = []
                for data in data_test:
                    df = pd.DataFrame(data)
                    df.columns = list_features
                    data_reduce.append(df[best_features].values)

                pred_labels, proba = model.test_model(data_reduce)

                F1_temp = []
                for i in range(len(labels_test)):
                    F1_temp.append(
                        tools.compute_F1_score(labels_test[i], pred_labels[i],
                                               list_states[num_track]))
					for nbr_test in range(nbr_cross_val):

						data_ref = []
						data_test = []

						for data in data_ref_all[nbr_test]:
							df_data = pd.DataFrame(data, columns = list_features)
							data_ref.append(df_data[sub_list_features].values)

						for data in data_test_all[nbr_test]:
							df_data = pd.DataFrame(data, columns = list_features)
							data_test.append(df_data[sub_list_features].values)

						model = ModelHMM()
						model.train(data_ref, labels_ref[num_track][nbr_test], sub_list_features, np.ones(len(sub_list_features)))

						#### Test
						predict_labels, proba = model.test_model(data_test)
				
						for i in range(len(predict_labels)):
							F1_score.append(tools.compute_F1_score(labels_test[num_track][nbr_test][i], predict_labels[i], list_states[num_track]))

					F1_S[num_track] = np.mean(F1_score)
					score_total[num_track].append(F1_S[num_track])

			score_totaux = pd.DataFrame(
				{'best_features': best_features_total})

			for name_track, num_track in zip(tracks, range(len(tracks))):
				df_track = pd.DataFrame(
Exemple #8
0
            del data_reduce_detailted[count[0]:count[-1] + 1]
            del labels_reduce_detailed[count[0]:count[-1] + 1]
            del data_reduce_details[count[0]:count[-1] + 1]
            del labels_reduce_details[count[0]:count[-1] + 1]
            del data_reduce_action[count[0]:count[-1] + 1]
            del labels_reduce_action[count[0]:count[-1] + 1]

        else:
            n_subject = len(list_participant)

        for nbr_test in range(nbr_cross_val):
            data_ref, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2(
                data_reduce_posture, labels_reduce_posture, ratio)

            model = ModelHMM()
            model.train(data_ref, labels_ref, best_features_posture,
                        np.ones(len(best_features_posture)))

            labels_ref_details = []
            data_details_ref = []
            labels_ref_detailed_posture = []
            data_ref_detailed_posture = []
            labels_ref_action = []
            data_ref_action = []

            if (test_generalisation):
                for id_subject in id_train:
                    labels_ref_details.append(
                        labels_reduce_details[id_subject])
                    data_details_ref.append(data_reduce_details[id_subject])
                    labels_ref_detailed_posture.append(
                        labels_reduce_detailed[id_subject])