def test_classification_error(self): data = TEST_DATA['error_fun'] error_val_expected = data['error_val'] error_val = classification_error(data['p_y_x'], data['y_true']) self.assertEqual(np.size(error_val), 1) self.assertAlmostEqual(error_val, error_val_expected)
def test_classification_error(self): p_y_x = TEST_DATA['error_fun']['p_y_x'] y_true = TEST_DATA['error_fun']['y_true'] error_val_expected = TEST_DATA['error_fun']['error_val'] error_val = classification_error(p_y_x, y_true) self.assertEqual(np.size(error_val), 1) self.assertAlmostEqual(error_val, error_val_expected)
def run_training(): data = load_data() # KNN model selection k_values = range(1, 201, 2) print( '\n------------- Selekcja liczby sasiadow dla modelu dla KNN -------------' ) print( '-------------------- Wartosci k: 1, 3, ..., 200 -----------------------' ) print( '--------------------- To moze potrwac ok. 1 min ------------------------' ) error_best, best_k, errors = model_selection_knn(data['Xval'], data['Xtrain'], data['yval'], data['ytrain'], k_values) print('Najlepsze k: {num1} i najlepszy blad: {num2:.4f}'.format( num1=best_k, num2=error_best)) print('\n--- Wcisnij klawisz, aby kontynuowac ---') classification_KNN_vs_no_neighbours(k_values, errors) a_values = [1, 3, 10, 30, 100, 300, 1000] b_values = [1, 3, 10, 30, 100, 300, 1000] print( '\n----------------- Selekcja parametrow a i b dla NB --------------------' ) print( '--------- Wartosci a i b: 1, 3, 10, 30, 100, 300, 1000 -----------------' ) print( '--------------------- To moze potrwac ok. 1 min ------------------------' ) # NB model selection error_best, best_a, best_b, errors = model_selection_nb( data['Xtrain'], data['Xval'], data['ytrain'], data['yval'], a_values, b_values) print('Najlepsze a: {}, b: {} i najlepszy blad: {:.4f}'.format( best_a, best_b, error_best)) print('\n--- Wcisnij klawisz, aby kontynuowac ---') plot_a_b_errors(errors, a_values, b_values) p_x_y = estimate_p_x_y_nb(data['Xtrain'], data['ytrain'], best_a, best_b) classes_no = p_x_y.shape[0] print( '\n------Wizualizacja najbardziej popularnych slow dla poszczegolnych klas------' ) print( '--Sa to slowa o najwyzszym prawdopodobienstwie w danej klasie dla modelu NB--' ) try: groupnames = data['groupnames'] words = {} for x in range(classes_no): indices = np.argsort(p_x_y[x, :])[::-1][:50] words[groupnames[x]] = { word: prob for word, prob in zip(data['wordlist'][indices], p_x_y[ x, indices]) } word_clouds(words.values(), words.keys()) except Exception: print('---Wystapil problem z biblioteka wordcloud--- ') print('\n--- Wcisnij klawisz, aby kontynuowac ---') print( '\n----------------Porownanie bledow dla KNN i NB---------------------' ) Dist = hamming_distance(data['Xtest'], data['Xtrain']) y_sorted = sort_train_labels_knn(Dist, data['ytrain']) p_y_x = p_y_x_knn(y_sorted, best_k) error_KNN = classification_error(p_y_x, data['ytest']) p_y = estimate_a_priori_nb(data['ytrain']) p_y_x = p_y_x_nb(p_y, p_x_y, data['Xtest']) error_NB = classification_error(p_y_x, data['ytest']) plot_error_NB_KNN(error_NB, error_KNN) print('\n--- Wcisnij klawisz, aby kontynuowac ---')
def run_training(): data = load_data() # KNN model selection k_values = range(1, 201, 2) print('\n------------- Model selection for KNN -------------') print( '-------------------- Values k: 1, 3, ..., 200 -----------------------' ) print( '--------------------- Calculation may take up to 1 min ------------------------' ) error_best, best_k, errors = model_selection_knn(data['Xval'], data['Xtrain'], data['yval'], data['ytrain'], k_values) print('The best k: {num1} and the best error: {num2:.4f}'.format( num1=best_k, num2=error_best)) print('\n--- Press any key to continue ---') classification_KNN_vs_no_neighbours(k_values, errors) a_values = [1, 3, 10, 30, 100, 300, 1000] b_values = [1, 3, 10, 30, 100, 300, 1000] print( '\n----------------- Model selection for a and b --------------------') print( '--------- Values a and b: 1, 3, 10, 30, 100, 300, 1000 -----------------' ) print( '--------------------- Calculation may take up to 1 min ------------------------' ) # NB model selection error_best, best_a, best_b, errors = model_selection_nb( data['Xtrain'], data['Xval'], data['ytrain'], data['yval'], a_values, b_values) print('The best a: {}, b: {} and the best error: {:.4f}'.format( best_a, best_b, error_best)) print('\n--- Press any key to continue ---') plot_a_b_errors(errors, a_values, b_values) p_x_y = estimate_p_x_y_nb(data['Xtrain'], data['ytrain'], best_a, best_b) classes_no = p_x_y.shape[0] print('\n------ Visualization of most popular words for each class ------') print( '-- These are words that are most probable for each class and NB model --' ) try: groupnames = data['groupnames'] words = {} for x in range(classes_no): indices = np.argsort(p_x_y[x, :])[::-1][:50] words[groupnames[x]] = { word: prob for word, prob in zip(data['wordlist'][indices], p_x_y[ x, indices]) } word_clouds(words.values(), words.keys()) except Exception: print('--- A problem with wordcloud library --- ') print('\n--- Press any key to continue ---') print( '\n---------------- Comparison of KNN and NB errors ---------------------' ) Dist = hamming_distance(data['Xtest'], data['Xtrain']) y_sorted = sort_train_labels_knn(Dist, data['ytrain']) p_y_x = p_y_x_knn(y_sorted, best_k) error_KNN = classification_error(p_y_x, data['ytest']) p_y = estimate_a_priori_nb(data['ytrain']) p_y_x = p_y_x_nb(p_y, p_x_y, data['Xtest']) error_NB = classification_error(p_y_x, data['ytest']) plot_error_NB_KNN(error_NB, error_KNN) print('\n--- Press any key to continue ---')
def test_classification_error(self): data = test_data['error_fun'] out = classification_error(data['p_y_x'], data['y_true']) self.assertAlmostEqual(out, data['error_val'], 8)