def test_model_selection_train_err(self):
        x_train = TEST_DATA['ms']['x_train']
        y_train = TEST_DATA['ms']['y_train']
        x_val = TEST_DATA['ms']['x_val']
        y_val = TEST_DATA['ms']['y_val']
        M_values = TEST_DATA['ms']['M_values']
        train_err_expected = TEST_DATA['ms']['train_err']

        _, train_err, _ = model_selection(x_train, y_train, x_val, y_val,
                                          M_values)

        self.assertEqual(np.size(train_err), 1)
        self.assertAlmostEqual(train_err, train_err_expected)
Beispiel #2
0
 def test_model_selection_theta(self):
     x_train = TEST_DATA['ms']['x_train']
     y_train = TEST_DATA['ms']['y_train']
     x_val = TEST_DATA['ms']['x_val']
     y_val = TEST_DATA['ms']['y_val']
     w0 = TEST_DATA['ms']['w0']
     eta = TEST_DATA['ms']['step']
     epochs = TEST_DATA['ms']['epochs']
     mini_batch = TEST_DATA['ms']['mini_batch']
     thetas = TEST_DATA['ms']['thetas']
     lambdas = TEST_DATA['ms']['lambdas']
     theta = TEST_DATA['ms']['theta']
     _, theta_computed, _, _ = model_selection(x_train, y_train, x_val,
                                               y_val, w0, epochs, eta,
                                               mini_batch, lambdas, thetas)
     self.assertAlmostEqual(theta, theta_computed, 6)
Beispiel #3
0
 def test_model_selection_F(self):
     x_train = TEST_DATA['ms']['x_train']
     y_train = TEST_DATA['ms']['y_train']
     x_val = TEST_DATA['ms']['x_val']
     y_val = TEST_DATA['ms']['y_val']
     w0 = TEST_DATA['ms']['w0']
     eta = TEST_DATA['ms']['step']
     epochs = TEST_DATA['ms']['epochs']
     mini_batch = TEST_DATA['ms']['mini_batch']
     thetas = TEST_DATA['ms']['thetas']
     lambdas = TEST_DATA['ms']['lambdas']
     F = TEST_DATA['ms']['F']
     _, _, _, F_computed = model_selection(x_train, y_train, x_val, y_val,
                                           w0, epochs, eta, mini_batch,
                                           lambdas, thetas)
     max_diff = np.max(np.abs(F - F_computed))
     self.assertAlmostEqual(max_diff, 0, 6)
Beispiel #4
0
    def test_model_selection_F(self):
        x_train = TEST_DATA['ms']['x_train']
        y_train = TEST_DATA['ms']['y_train']
        x_val = TEST_DATA['ms']['x_val']
        y_val = TEST_DATA['ms']['y_val']
        w0 = TEST_DATA['ms']['w0']
        epochs = TEST_DATA['ms']['epochs']
        eta = TEST_DATA['ms']['step']
        mini_batch = TEST_DATA['ms']['mini_batch']
        lambdas = TEST_DATA['ms']['lambdas']
        thetas = TEST_DATA['ms']['thetas']
        F_expected = TEST_DATA['ms']['F']

        _, _, w, F = model_selection(x_train, y_train, x_val, y_val, w0, epochs, eta, mini_batch,
                                     lambdas, thetas)
        self.assertEqual(np.shape(F), (3, 4))
        np.testing.assert_almost_equal(F, F_expected)
Beispiel #5
0
    def test_model_selection_lambda(self):
        x_train = TEST_DATA['ms']['x_train']
        y_train = TEST_DATA['ms']['y_train']
        x_val = TEST_DATA['ms']['x_val']
        y_val = TEST_DATA['ms']['y_val']
        w0 = TEST_DATA['ms']['w0']
        epochs = TEST_DATA['ms']['epochs']
        eta = TEST_DATA['ms']['step']
        mini_batch = TEST_DATA['ms']['mini_batch']
        lambdas = TEST_DATA['ms']['lambdas']
        thetas = TEST_DATA['ms']['thetas']
        reg_lambda_expected = TEST_DATA['ms']['lambda']

        reg_lambda, _, _, _ = model_selection(x_train, y_train, x_val, y_val, w0, epochs, eta,
                                              mini_batch, lambdas, thetas)

        self.assertEqual(np.size(reg_lambda), 1)
        self.assertAlmostEqual(reg_lambda, reg_lambda_expected)
Beispiel #6
0
    plt.waitforbuttonpress(0)

    # Selekcja modelu
    print(
        '\n--- Selekcja modelu dla liniowego zadania najmniejszych kwadratow ---'
    )
    print(
        '---------------- Modele wielomianowe stopnia M=0,...,7 ----------------'
    )
    print(
        '- Liczba punktow treningowych N=50. Liczba punktow walidacyjnych N=20 -'
    )

    M_values = range(0, 7)
    w, train_err, val_err = model_selection(data['x_train_50'],
                                            data['y_train_50'],
                                            data['x_val_20'], data['y_val_20'],
                                            M_values)
    M = np.shape(w)[0] - 1
    y_model = polynomial(x_plot, w)

    fig = plt.figure(figsize=(6, 5), num='Selekcja modelu dla M')
    sub = fig.add_subplot(1, 1, 1)
    sub.set_title('Najlepsze M={}'.format(M))
    plot_model(data['x_train_50'], data['y_train_50'], x_plot, y_obj, y_model,
               data['x_val_20'], data['y_val_20'], train_err, val_err)

    plt.tight_layout()
    plt.draw()
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    plt.waitforbuttonpress(0)
def run_training():
    data = load_data()

    print(
        '----------Uczenie regresji logistycznej metodą gradientu prostego--------'
    )
    print(
        '------------------To może potrwać ok. 1 min -----------------------------'
    )

    eta = 0.1
    theta = 0.65
    lambdas = [0, 0.00001, 0.0001, 0.001, 0.01, 0.1]
    thetas = list(np.arange(0.1, 0.9, 0.05))

    log_cost_for_data = functools.partial(logistic_cost_function,
                                          x_train=data['x_train'],
                                          y_train=data['y_train'])
    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed1, f_values1 = gradient_descent(log_cost_for_data, w_0, EPOCHS,
                                              eta)

    print('Wartość funkcji celu na końcu: {:.4f}'.format(f_values1[-1][0]))

    print(
        '\n-------Uczenie regresji logistycznej metodą gradientu stochastycznego-----'
    )
    print(
        '------------------To może potrwać ok. 1 min -----------------------------'
    )

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed2, f_values2 = stochastic_gradient_descent(
        logistic_cost_function, data['x_train'], data['y_train'], w_0, EPOCHS,
        eta, MINIBATCH_SIZE)

    print('Wartość funkcji celu na końcu: {:.4f}'.format(f_values2[-1][0]))
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    plot_f_values(f_values1, f_values2)

    print(
        '\n-----------------------Selekcja modelu -------------------------------'
    )
    print('--Algorytm uczący: SGD--')
    print('--Kryterium uczenia: regularized_logistic_cost_function--')
    print('--Krok uczenia: {}--'.format(eta))
    print('--Liczba epok: {}--'.format(EPOCHS))
    print('--Wielkosc mini-batcha: {}--'.format(MINIBATCH_SIZE))

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    l, t, w_computed, F = model_selection(data['x_train'], data['y_train'],
                                          data['x_val'], data['y_val'], w_0,
                                          EPOCHS, eta, MINIBATCH_SIZE, lambdas,
                                          thetas)

    print('Najlepszy parametr regularyzacji lambda: {}'.format(l))
    print('Najlepszy prog klasyfikacji theta: {:.4f}'.format(t))
    print('Najlepsza wartosc miary F: {:.4f}'.format(np.max(F)))
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    plot_theta_lambda(F, thetas, lambdas)

    print(
        '\n------------------------DETEKCJA TWARZY-------------------------------\n'
    )
    animate_face_detect(w_computed, t)
Beispiel #8
0
def run_training():
    data = load_data()

    print(
        '---------- Training logistic regression with gradient descent --------'
    )
    print(
        '------------------ May take up to 1 min. -----------------------------'
    )

    eta = 0.1
    theta = 0.65
    lambdas = [0, 0.00001, 0.0001, 0.001, 0.01, 0.1]
    thetas = list(np.arange(0.1, 0.9, 0.05))

    log_cost_for_data = functools.partial(logistic_cost_function,
                                          x_train=data['x_train'],
                                          y_train=data['y_train'])
    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed1, f_values1 = gradient_descent(log_cost_for_data, w_0, EPOCHS,
                                              eta)

    print('Final value of optimization objective: {:.4f}'.format(
        f_values1[-1][0]))

    print(
        '\n-------  Training logistic regression with stochastic gradient descent  -----'
    )
    print(
        '------------------ May take up to 1 min. -----------------------------'
    )

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed2, f_values2 = stochastic_gradient_descent(
        logistic_cost_function, data['x_train'], data['y_train'], w_0, EPOCHS,
        eta, MINIBATCH_SIZE)

    print('Final value of optimization objective: {:.4f}'.format(
        f_values2[-1][0]))
    print('\n--- Press any key to continue ---')
    plot_f_values(f_values1, f_values2)

    print(
        '\n----------------------- Model selection -------------------------------'
    )
    print('--Optimization method: SGD--')
    print('--Training criterium: regularized_logistic_cost_function--')
    print('--Step: {}'.format(eta))
    print('--Number of epochs: {}--'.format(EPOCHS))
    print('--Mini-batch size: {}--'.format(MINIBATCH_SIZE))

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    l, t, w_computed, F = model_selection(data['x_train'], data['y_train'],
                                          data['x_val'], data['y_val'], w_0,
                                          EPOCHS, eta, MINIBATCH_SIZE, lambdas,
                                          thetas)

    print('The best regulatization coefficient: {}'.format(l))
    print('The best threshold value (theta): {:.4f}'.format(t))
    print('The best value of F-measure: {:.4f}'.format(np.max(F)))
    print('\n--- Press any key to continue ---')
    plot_theta_lambda(F, thetas, lambdas)

    print(
        '\n------------------------ FACE DETECTION -------------------------------\n'
    )
    animate_face_detect(w_computed, t)