Beispiel #1
0
    vn_score_tab = []
    vp_score_tab = []

    nu_range = np.logspace(-3, 0, 10)
    gamma_range = np.logspace(-2, 0, 6)

    # for a in a_range:
    for gamma in gamma_range:
        print(gamma)
        for nu in nu_range:
            print(nu)
            cl = sksvm.OneClassSVM(gamma=gamma, nu=nu)
            cl.fit(x_train)
            y_predit = cl.predict(x_test)

            vp_score = tools.partial_score(y_test, y_predit, 1)
            vn_score = tools.partial_score(y_test, y_predit, -1)

            vp_score_tab.append(vp_score)
            vn_score_tab.append(vn_score)

    print1b1 = False
    if print1b1:
        for g_range in range(len(gamma_range)):
            plt.figure()
            plt.semilogx(nu_range,
                         vn_score_tab[g_range * len(nu_range):(g_range + 1) *
                                      len(nu_range)],
                         "--",
                         label=r"Vrai négatif : $\gamma$ = " +
                         str(gamma_range[g_range]),
Beispiel #2
0
	# k_range = [1]
	# k_range = [2, 3, 4, 5, 6]
	k_range = [7, 10, 20, 40, 60, ]

	a_range = [0.01, 0.02, 0.03, 0.05, 0.07, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.8, 9, 9.5, 10, 15, 30, 45, 60, 75, 90, 100]
	
	# for a in a_range:
	for k in k_range:
		print(k)
		cl = K_NN_PC(k=k)
		cl.fit(x_train)
		y_predit = cl.predict_multi_alpha(x_test, a_range)

		for a_index in range(len(a_range)):
			vp_score = tools.partial_score(y_test, y_predit[:, a_index], 1)
			vn_score = tools.partial_score(y_test, y_predit[:, a_index], -1)

			vp_score_tab.append(vp_score)
			vn_score_tab.append(vn_score)

	print1b1 = False
	if print1b1:
		for k_index in range(len(k_range)):
			plt.figure()
			plt.semilogx(a_range, vn_score_tab[k_index * len(a_range) : (k_index + 1) * len(a_range)], "--", label=r"Vrai négatif : k = " + str(k_range[k_index]), color=tools.colors[k_index])
			plt.semilogx(a_range, vp_score_tab[k_index * len(a_range) : (k_index + 1) * len(a_range)], label=r"Vrai positif : k = " + str(k_range[k_index]), color=tools.colors[k_index])
			plt.xlabel(r"Valeur de $\alpha$")
			plt.ylabel("Précision")
			plt.title(r"Évolution de la précision en fonction du nombre de plus proche voisin et du coefficient $\alpha$.", wrap=True)
			plt.legend()
Beispiel #3
0
	def inlier_score(self, data, label):
		pred_label = self.predict(data)
		return tools.partial_score(label, pred_label, 1)
Beispiel #4
0
            # 		# # print(cl.score(x_train, y_train))
            # 		# # print(cl.score(x_test, y_test))

            # 		# # print(tools.partial_score(y_test, y_predit, 1))
            # 		# print(np.sum(y_test[y_test == y_predit] == 1))
            # 		# print(tools.partial_score(y_test, y_predit, -1))

            cl = nearest_neighbours.Nearest_Neighbours(alpha=4)
            cl.fit(x_train, y_train)

            # # Fonction spécifique au classifieur Nearest_Neighbours
            # # cl.visualisation(x_test)

            y_predit = cl.predict(x_test)

            vp_score.append(tools.partial_score(y_test, y_predit, 1))
            vn_score.append(tools.partial_score(y_test, y_predit, -1))

    # 		# print(cl.score(x_test, y_test))

    # 		#  Fonction spécifique au classifieur Nearest_Neighbours
    # 		# print(cl.outlier_score(x_test, y_test))

    # 		# # Fonction spécifique au classifieur Nearest_Neighbours
    # 		# print(cl.inlier_score(x_test, y_test))

    # fig, ax = plt.subplots()
    # plt.title("Évolution de la précision en fonction du ratio d'exemple négatif disponible")
    # ax.scatter(vp_score[:nb_essai], vn_score[:nb_essai], color='r', label="Données artificielles type 1", marker="+")
    # ax.scatter(vp_score[nb_essai:], vn_score[nb_essai:], color='b', label="Données artificielles type 2", marker="+")
    # plt.ylim(0.45, 1.05)