Exemplo n.º 1
0
                model = ModelHMM()
                model.train(data_ref, labels_ref, list_features_final,
                            dim_features)

                data_ref = []
                for data in data_test:
                    df = pd.DataFrame(data)
                    df.columns = list_features
                    data_ref.append(df[list_features_final].values)

                pred_labels, proba = model.test_model(data_ref)

                F1_temp = []
                for i in range(len(labels_test)):
                    F1_temp.append(
                        tools.compute_F1_score(labels_test[i], pred_labels[i],
                                               list_states[num_track]))

                F1_score.append(np.mean(F1_temp))
                dim_score.append(dim)
                feaures_save.append(str(list_features_final))

            score_totaux = pd.DataFrame({
                'nbr_components': dim_score,
                'score': F1_score,
                'features': feaures_save
            })

            score_totaux.to_csv('score/score_model_wrapper_' + name_track +
                                "2.csv",
                                index=False)
            dim += 1
Exemplo n.º 2
0
                time_test = []
                for id_subject in id_test:
                    time_test.append(timestamps[id_subject])

                for i in range(len(labels_test)):
                    conf_mat = tools.compute_confusion_matrix(
                        predict_labels_fisher[i], labels_test[i], list_states)
                    confusion_matrix_fisher += conf_mat

                    conf_mat = tools.compute_confusion_matrix(
                        predict_labels_wrapper[i], labels_test[i], list_states)
                    confusion_matrix_wrapper += conf_mat

                    F1_fisher.append(
                        tools.compute_F1_score(labels_test[i],
                                               predict_labels_fisher[i],
                                               list_states))
                    F1_wrapper.append(
                        tools.compute_F1_score(labels_test[i],
                                               predict_labels_wrapper[i],
                                               list_states))
                    total += 1

                F1_f.append(np.mean(F1_fisher))
                F1_w.append(np.mean(F1_wrapper))

                F1_fisher_temp += np.mean(F1_fisher)
                F1_wrapper_temp += np.mean(F1_wrapper)

                index_nbr_f.append(nbr_features - 1)
                index_nbr_f.append(nbr_features - 1)
						for data in data_ref_all[nbr_test]:
							df_data = pd.DataFrame(data, columns = list_features)
							data_ref.append(df_data[sub_list_features].values)

						for data in data_test_all[nbr_test]:
							df_data = pd.DataFrame(data, columns = list_features)
							data_test.append(df_data[sub_list_features].values)

						model = ModelHMM()
						model.train(data_ref, labels_ref[num_track][nbr_test], sub_list_features, np.ones(len(sub_list_features)))

						#### Test
						predict_labels, proba = model.test_model(data_test)
				
						for i in range(len(predict_labels)):
							F1_score.append(tools.compute_F1_score(labels_test[num_track][nbr_test][i], predict_labels[i], list_states[num_track]))

					F1_S[num_track] = np.mean(F1_score)
					score_total[num_track].append(F1_S[num_track])

			score_totaux = pd.DataFrame(
				{'best_features': best_features_total})

			for name_track, num_track in zip(tracks, range(len(tracks))):
				df_track = pd.DataFrame(
					{ name_track: score_total[num_track]})
				score_totaux = pd.concat([score_totaux, df_track], axis=1)

			score_totaux = ranking_features(score_totaux, tracks, method_sort)

			score_totaux.to_csv(path_save + '/' + file_name + str(iteration+1), index=False)
			# ax.grid()

			for id_train in range(len(data_ref)):
				data_ref[id_train] = pca.transform(data_ref[id_train])

			for id_test in range(len(data_test)):
				data_test[id_test] = pca.transform(data_test[id_test])

			model = ModelHMM()
			model.train(data_ref, labels_ref, col, dim_features)

			pred_labels, proba = model.test_model(data_test)

			F1_temp = []
			for i in range(len(labels_test)):
				F1_temp.append(tools.compute_F1_score(labels_test[i], pred_labels[i], list_states))

			F1_score.append(np.mean(F1_temp))
			dim_score.append(n_components)



		score_totaux = pd.DataFrame(
		{'nbr_components': dim_score,
		 'score': F1_score,
		})

		score_totaux.to_csv('score_pca' + '_' + name_track + ".csv", index=False)


Exemplo n.º 5
0
                            list_states_posture_final = sorted(
                                list_states_posture_final)

                for i in range(len(predict_labels)):
                    MCC_combined.append(
                        tools.compute_MCC_score(
                            labels_test_detailed_posture[i], labels_final[i],
                            list_states_posture_final))
                    MCC_detailed.append(
                        tools.compute_MCC_score(
                            labels_test_detailed_posture[i],
                            predict_labels_detailed_posture[i],
                            list_states_posture_final))
                    MCC_general.append(
                        tools.compute_F1_score(labels_test[i],
                                               predict_labels[i],
                                               list_states_posture))
                    MCC_details.append(
                        tools.compute_F1_score(labels_test_details[i],
                                               predict_labels_details[i],
                                               list_states_details))

                    F1_combined.append(
                        tools.compute_F1_score(labels_test_detailed_posture[i],
                                               labels_final[i],
                                               list_states_posture_final))
                    F1_detailed.append(
                        tools.compute_F1_score(
                            labels_test_detailed_posture[i],
                            predict_labels_detailed_posture[i],
                            list_states_posture_final))