Exemple #1
0
        F1_score = []
        dim_score = []
        feaures_save = []

        dim = 11
        file_wrapper = path_wrapper + 'wrapper_' + name_track + ".csv_" + str(
            dim)
        # for dim in range(1, 11):
        while (os.path.isfile(file_wrapper)):
            print(file_wrapper)
            list_features_final = get_best_features(file_wrapper)[0]
            dim_features = np.ones(len(list_features_final))

            for n_iter in range(nbr_cross_val):
                data_ref1, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2(
                    data_win2, real_labels[num_track], ratio)

                data_ref = []
                for data in data_ref1:
                    df = pd.DataFrame(data)
                    df.columns = list_features
                    data_ref.append(df[list_features_final].values)

                model = ModelHMM()
                model.train(data_ref, labels_ref, list_features_final,
                            dim_features)

                data_ref = []
                for data in data_test:
                    df = pd.DataFrame(data)
                    df.columns = list_features
Exemple #2
0
                        labels_gen.append(real_labels[i])

                        count.append(i)

                del data_reduce[count[0]:count[-1] + 1]
                del labels_reduce[count[0]:count[-1] + 1]

            else:
                n_subject = len(list_participant)

            F1_wrapper_temp = 0
            F1_fisher_temp = 0

            for nbr_test in range(nbr_cross_val):
                total = 0
                data_ref, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2(
                    data_reduce, labels_reduce, ratio)

                data_fisher = []
                data_wrapper = []

                data_win = deepcopy(data_win2)

                for id_subject in id_train:
                    df = pd.DataFrame(data_win2[id_subject])
                    df.columns = list_features

                    data_fisher.append(df[best_features_fisher].values)
                    data_wrapper.append(df[best_features_wrapper].values)

                model_fisher = ModelHMM()
                model_fisher.train(data_fisher, labels_ref,
		id_train = [[]]
		id_test = [[]]

		best_features_total = []
		score_total = [[]]
		

		for i in range(len(tracks)):
			if(i < len(tracks)-1):
				score_total.append([])

		for k in range(nbr_cross_val):

			for num_track in range(len(tracks)):
				if(num_track == 0):
					data_ref_all[k], labels_ref[num_track][k], data_test_all[k], labels_test[num_track][k], id_train[k], id_test[k] = tools.split_data_base2(data_win2, real_labels[num_track], ratio)
				else:
					for id_subject in id_train[k]:
						labels_ref[num_track][k].append(real_labels[num_track][id_subject])

					for id_subject in id_test[k]:
						labels_test[num_track][k].append(real_labels[num_track][id_subject])

				if(len(labels_ref) < len(tracks)):
					labels_ref.append([[]])
					labels_test.append([[]])

				if(k < nbr_cross_val-1):
					labels_ref[num_track].append([])
					labels_test[num_track].append([])
        ###################################################

                data_win = []

                for j in range(len(df_all_data)):
                    data_win.append(df_all_data[j][sub_list_features].values)

                F1_S = 0
                MCC = 0

                confusion_matrix = np.zeros(
                    (len(list_states), len(list_states)))
                for nbr_test in range(nbr_cross_val):

                    data_ref, labels_ref, data_test, labels_test, id_train, id_test = tools.split_data_base2(
                        data_win, real_labels, ratio)

                    model = ModelHMM()
                    model.train(data_ref, labels_ref, sub_list_features,
                                np.ones(len(sub_list_features)))

                    #### Test
                    time_test = []
                    for id_subject in id_test:
                        time_test.append(timestamps[id_subject])

                    predict_labels, proba = model.test_model(data_test)

                    for i in range(len(predict_labels)):
                        conf_mat = tools.compute_confusion_matrix(
                            predict_labels[i], labels_test[i], list_states)
        id_test = [[]]

        best_features_total = []
        score_total = [[]]

        for i in range(len(tracks)):
            if (i < len(tracks) - 1):
                score_total.append([])

        for k in range(nbr_cross_val):

            for num_track in range(len(tracks)):
                if (num_track == 0):
                    data_ref_all[k], labels_ref[num_track][k], data_test_all[
                        k], labels_test[num_track][k], id_train[k], id_test[
                            k] = tools.split_data_base2(
                                data_win2, real_labels[num_track], ratio)
                else:
                    for id_subject in id_train[k]:
                        labels_ref[num_track][k].append(
                            real_labels[num_track][id_subject])

                    for id_subject in id_test[k]:
                        labels_test[num_track][k].append(
                            real_labels[num_track][id_subject])

                if (len(labels_ref) < len(tracks)):
                    labels_ref.append([[]])
                    labels_test.append([[]])

                if (k < nbr_cross_val - 1):
                    labels_ref[num_track].append([])