def cv_knn(N_split, X_train, y_train, X_test, y_test, K_values): X = np.concatenate((X_train, X_test), axis=0) y = np.concatenate((y_train, y_test), axis=0) # Splits X_splits = np.split(X, N_split) y_splits = np.split(y, N_split) # Recherche scores = {} for K in K_values: accuracys = [] for i in range(N_split): X_train = np.concatenate(np.delete(X_splits, i, 0)) X_test = X_splits[i] y_train = np.concatenate(np.delete(y_splits, i, 0)) y_test = y_splits[i] model = Knn(K=K) model.train(X_train, y_train) evaluate = model.evaluate(X_test, y_test) accuracys.append(evaluate['mean_accuracy']) scores[K] = np.mean(accuracys) # print + selection print(max(scores.items(), key=operator.itemgetter(1))) return max(scores.items(), key=operator.itemgetter(1))[0]
clf_Knn_abalone.train(X_train_abalone, y_train_abalone) """ Après avoir fait l'entrainement, évaluez votre modèle sur les données d'entrainement. IMPORTANT : Vous devez afficher ici avec la commande print() de python, - la matrice de confusion (confusion matrix) - l'accuracy - la précision (precision) - le rappel (recall) - le F1-score """ # Tester votre classifieur print('---------- IRIS TRAIN ----------') evaluate_train = clf_Knn_iris.evaluate(X_train_iris, y_train_iris) for e in evaluate_train: print(f'{e}\n {evaluate_train[e]}') print('---------- WINE TRAIN ----------') evaluate_train = clf_Knn_wine.evaluate(X_train_wine, y_train_wine) for e in evaluate_train: print(f'{e}\n {evaluate_train[e]}') print('---------- ABALONE TRAIN ----------') evaluate_train = clf_Knn_abalone.evaluate(X_train_abalone, y_train_abalone) for e in evaluate_train: print(f'{e}\n {evaluate_train[e]}') """ Finalement, évaluez votre modèle sur les données de test. IMPORTANT :