예제 #1
0
    def test_p_y_x_knn(self):
        data = TEST_DATA['p_y_x_KNN']
        p_y_x_expected = data['p_y_x']

        p_y_x = p_y_x_knn(data['y'], data['K'])

        self.assertEqual(np.shape(p_y_x), (40, 4))
        np.testing.assert_almost_equal(p_y_x, p_y_x_expected)
예제 #2
0
    def test_p_y_x_knn(self):
        y = TEST_DATA['p_y_x_KNN']['y']
        K = TEST_DATA['p_y_x_KNN']['K']

        p_y_x_expected = TEST_DATA['p_y_x_KNN']['p_y_x']
        p_y_x = p_y_x_knn(y, K)
        self.assertEqual(np.shape(p_y_x), (40, 4))
        np.testing.assert_almost_equal(p_y_x, p_y_x_expected)
예제 #3
0
def predict(x):
    """
    Funkcja pobiera macierz przykladow zapisanych w macierzy X o wymiarach NxD i zwraca wektor y o wymiarach Nx1,
    gdzie kazdy element jest z zakresu {0, ..., 35} i oznacza znak rozpoznany na danym przykladzie.
    :param x: macierz o wymiarach NxD
    :return: wektor o wymiarach Nx1
    """
    x_train = get_learn_data()[0]
    y_train = get_learn_data()[1]

    distances_array = hamming_distance(x, x_train)
    sorted_labels = sort_train_labels_knn(distances_array, y_train)
    each_class_probability = p_y_x_knn(sorted_labels, 1)
    predicted_vector = np.argmax(each_class_probability,axis=AXIS_COLUMNS) + 1
    #add 1 to all vector class
    return predicted_vector
예제 #4
0
def run_training():
    data = load_data()

    # KNN model selection
    k_values = range(1, 201, 2)
    print(
        '\n------------- Selekcja liczby sasiadow dla modelu dla KNN -------------'
    )
    print(
        '-------------------- Wartosci k: 1, 3, ..., 200 -----------------------'
    )
    print(
        '--------------------- To moze potrwac ok. 1 min ------------------------'
    )

    error_best, best_k, errors = model_selection_knn(data['Xval'],
                                                     data['Xtrain'],
                                                     data['yval'],
                                                     data['ytrain'], k_values)
    print('Najlepsze k: {num1} i najlepszy blad: {num2:.4f}'.format(
        num1=best_k, num2=error_best))
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    classification_KNN_vs_no_neighbours(k_values, errors)
    a_values = [1, 3, 10, 30, 100, 300, 1000]
    b_values = [1, 3, 10, 30, 100, 300, 1000]

    print(
        '\n----------------- Selekcja parametrow a i b dla NB --------------------'
    )
    print(
        '--------- Wartosci a i b: 1, 3, 10, 30, 100, 300, 1000 -----------------'
    )
    print(
        '--------------------- To moze potrwac ok. 1 min ------------------------'
    )

    # NB model selection
    error_best, best_a, best_b, errors = model_selection_nb(
        data['Xtrain'], data['Xval'], data['ytrain'], data['yval'], a_values,
        b_values)

    print('Najlepsze a: {}, b: {} i najlepszy blad: {:.4f}'.format(
        best_a, best_b, error_best))
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    plot_a_b_errors(errors, a_values, b_values)
    p_x_y = estimate_p_x_y_nb(data['Xtrain'], data['ytrain'], best_a, best_b)

    classes_no = p_x_y.shape[0]
    print(
        '\n------Wizualizacja najbardziej popularnych slow dla poszczegolnych klas------'
    )
    print(
        '--Sa to slowa o najwyzszym prawdopodobienstwie w danej klasie dla modelu NB--'
    )

    try:
        groupnames = data['groupnames']
        words = {}
        for x in range(classes_no):
            indices = np.argsort(p_x_y[x, :])[::-1][:50]
            words[groupnames[x]] = {
                word: prob
                for word, prob in zip(data['wordlist'][indices], p_x_y[
                    x, indices])
            }
        word_clouds(words.values(), words.keys())
    except Exception:
        print('---Wystapil problem z biblioteka wordcloud--- ')

    print('\n--- Wcisnij klawisz, aby kontynuowac ---')

    print(
        '\n----------------Porownanie bledow dla KNN i NB---------------------'
    )

    Dist = hamming_distance(data['Xtest'], data['Xtrain'])
    y_sorted = sort_train_labels_knn(Dist, data['ytrain'])
    p_y_x = p_y_x_knn(y_sorted, best_k)
    error_KNN = classification_error(p_y_x, data['ytest'])

    p_y = estimate_a_priori_nb(data['ytrain'])
    p_y_x = p_y_x_nb(p_y, p_x_y, data['Xtest'])
    error_NB = classification_error(p_y_x, data['ytest'])

    plot_error_NB_KNN(error_NB, error_KNN)
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
예제 #5
0
파일: main.py 프로젝트: kaaboaye/modele
def run_training():
    data = load_data()

    # KNN model selection
    k_values = range(1, 201, 2)
    print('\n------------- Model selection for KNN -------------')
    print(
        '-------------------- Values k: 1, 3, ..., 200 -----------------------'
    )
    print(
        '--------------------- Calculation may take up to 1 min ------------------------'
    )

    error_best, best_k, errors = model_selection_knn(data['Xval'],
                                                     data['Xtrain'],
                                                     data['yval'],
                                                     data['ytrain'], k_values)
    print('The best k: {num1} and the best error: {num2:.4f}'.format(
        num1=best_k, num2=error_best))
    print('\n--- Press any key to continue ---')
    classification_KNN_vs_no_neighbours(k_values, errors)
    a_values = [1, 3, 10, 30, 100, 300, 1000]
    b_values = [1, 3, 10, 30, 100, 300, 1000]

    print(
        '\n----------------- Model selection for a and b --------------------')
    print(
        '--------- Values a and b: 1, 3, 10, 30, 100, 300, 1000 -----------------'
    )
    print(
        '--------------------- Calculation may take up to 1 min ------------------------'
    )

    # NB model selection
    error_best, best_a, best_b, errors = model_selection_nb(
        data['Xtrain'], data['Xval'], data['ytrain'], data['yval'], a_values,
        b_values)

    print('The best a: {}, b: {} and the best error: {:.4f}'.format(
        best_a, best_b, error_best))
    print('\n--- Press any key to continue ---')
    plot_a_b_errors(errors, a_values, b_values)
    p_x_y = estimate_p_x_y_nb(data['Xtrain'], data['ytrain'], best_a, best_b)

    classes_no = p_x_y.shape[0]
    print('\n------ Visualization of most popular words for each class ------')
    print(
        '-- These are words that are most probable for each class and NB model --'
    )

    try:
        groupnames = data['groupnames']
        words = {}
        for x in range(classes_no):
            indices = np.argsort(p_x_y[x, :])[::-1][:50]
            words[groupnames[x]] = {
                word: prob
                for word, prob in zip(data['wordlist'][indices], p_x_y[
                    x, indices])
            }
        word_clouds(words.values(), words.keys())
    except Exception:
        print('--- A problem with wordcloud library --- ')

    print('\n--- Press any key to continue ---')

    print(
        '\n---------------- Comparison of KNN and NB errors ---------------------'
    )

    Dist = hamming_distance(data['Xtest'], data['Xtrain'])
    y_sorted = sort_train_labels_knn(Dist, data['ytrain'])
    p_y_x = p_y_x_knn(y_sorted, best_k)
    error_KNN = classification_error(p_y_x, data['ytest'])

    p_y = estimate_a_priori_nb(data['ytrain'])
    p_y_x = p_y_x_nb(p_y, p_x_y, data['Xtest'])
    error_NB = classification_error(p_y_x, data['ytest'])

    plot_error_NB_KNN(error_NB, error_KNN)
    print('\n--- Press any key to continue ---')
예제 #6
0
 def test_p_y_x_knn(self):
     data = test_data['p_y_x_KNN']
     out = p_y_x_knn(data['y'], data['K'])
     max_diff = np.max(np.abs(data['p_y_x'] - out))
     self.assertAlmostEqual(max_diff, 0, 8)