Ejemplo n.º 1
0
 def test_stochastic_gradient_descent_func_values(self):
     x_train = TEST_DATA['sopt']['x_train']
     y_train = TEST_DATA['sopt']['y_train']
     w0 = np.copy(TEST_DATA['sopt']['w0'])
     eta = TEST_DATA['sopt']['step']
     epochs = TEST_DATA['sopt']['epochs']
     func_values = TEST_DATA['sopt']['func_values']
     obj_fun = TEST_DATA['sopt']['obj_fun']
     mini_batch = TEST_DATA['sopt']['mini_batch']
     _, func_values_computed = stochastic_gradient_descent(
         obj_fun, x_train, y_train, w0, epochs, eta, mini_batch)
     max_diff = np.max(np.abs(func_values - func_values_computed))
     self.assertAlmostEqual(max_diff, 0, 6)
Ejemplo n.º 2
0
    def test_stochastic_gradient_descent_w(self):
        x_train = TEST_DATA['sopt']['x_train']
        y_train = TEST_DATA['sopt']['y_train']
        w0 = np.copy(TEST_DATA['sopt']['w0'])
        epochs = TEST_DATA['sopt']['epochs']
        eta = TEST_DATA['sopt']['step']
        mini_batch = TEST_DATA['sopt']['mini_batch']
        w_expected = TEST_DATA['sopt']['w']

        w, _ = stochastic_gradient_descent(logistic_cost_function, x_train, y_train, w0, epochs,
                                           eta, mini_batch)

        self.assertEqual(np.shape(w), (2, 1))
        np.testing.assert_almost_equal(w, w_expected)
Ejemplo n.º 3
0
    def test_stochastic_gradient_descent_func_values(self):
        x_train = TEST_DATA['sopt']['x_train']
        y_train = TEST_DATA['sopt']['y_train']
        w0 = np.copy(TEST_DATA['sopt']['w0'])
        epochs = TEST_DATA['sopt']['epochs']
        eta = TEST_DATA['sopt']['step']
        mini_batch = TEST_DATA['sopt']['mini_batch']
        func_values_expected = TEST_DATA['sopt']['func_values']

        _, func_values = stochastic_gradient_descent(logistic_cost_function,
                                                     x_train, y_train, w0,
                                                     epochs, eta, mini_batch)

        self.assertEqual(np.shape(func_values), (100, 1))
        np.testing.assert_almost_equal(func_values, func_values_expected)
def run_training():
    data = load_data()

    print(
        '----------Uczenie regresji logistycznej metodą gradientu prostego--------'
    )
    print(
        '------------------To może potrwać ok. 1 min -----------------------------'
    )

    eta = 0.1
    theta = 0.65
    lambdas = [0, 0.00001, 0.0001, 0.001, 0.01, 0.1]
    thetas = list(np.arange(0.1, 0.9, 0.05))

    log_cost_for_data = functools.partial(logistic_cost_function,
                                          x_train=data['x_train'],
                                          y_train=data['y_train'])
    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed1, f_values1 = gradient_descent(log_cost_for_data, w_0, EPOCHS,
                                              eta)

    print('Wartość funkcji celu na końcu: {:.4f}'.format(f_values1[-1][0]))

    print(
        '\n-------Uczenie regresji logistycznej metodą gradientu stochastycznego-----'
    )
    print(
        '------------------To może potrwać ok. 1 min -----------------------------'
    )

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed2, f_values2 = stochastic_gradient_descent(
        logistic_cost_function, data['x_train'], data['y_train'], w_0, EPOCHS,
        eta, MINIBATCH_SIZE)

    print('Wartość funkcji celu na końcu: {:.4f}'.format(f_values2[-1][0]))
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    plot_f_values(f_values1, f_values2)

    print(
        '\n-----------------------Selekcja modelu -------------------------------'
    )
    print('--Algorytm uczący: SGD--')
    print('--Kryterium uczenia: regularized_logistic_cost_function--')
    print('--Krok uczenia: {}--'.format(eta))
    print('--Liczba epok: {}--'.format(EPOCHS))
    print('--Wielkosc mini-batcha: {}--'.format(MINIBATCH_SIZE))

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    l, t, w_computed, F = model_selection(data['x_train'], data['y_train'],
                                          data['x_val'], data['y_val'], w_0,
                                          EPOCHS, eta, MINIBATCH_SIZE, lambdas,
                                          thetas)

    print('Najlepszy parametr regularyzacji lambda: {}'.format(l))
    print('Najlepszy prog klasyfikacji theta: {:.4f}'.format(t))
    print('Najlepsza wartosc miary F: {:.4f}'.format(np.max(F)))
    print('\n--- Wcisnij klawisz, aby kontynuowac ---')
    plot_theta_lambda(F, thetas, lambdas)

    print(
        '\n------------------------DETEKCJA TWARZY-------------------------------\n'
    )
    animate_face_detect(w_computed, t)
Ejemplo n.º 5
0
def run_training():
    data = load_data()

    print(
        '---------- Training logistic regression with gradient descent --------'
    )
    print(
        '------------------ May take up to 1 min. -----------------------------'
    )

    eta = 0.1
    theta = 0.65
    lambdas = [0, 0.00001, 0.0001, 0.001, 0.01, 0.1]
    thetas = list(np.arange(0.1, 0.9, 0.05))

    log_cost_for_data = functools.partial(logistic_cost_function,
                                          x_train=data['x_train'],
                                          y_train=data['y_train'])
    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed1, f_values1 = gradient_descent(log_cost_for_data, w_0, EPOCHS,
                                              eta)

    print('Final value of optimization objective: {:.4f}'.format(
        f_values1[-1][0]))

    print(
        '\n-------  Training logistic regression with stochastic gradient descent  -----'
    )
    print(
        '------------------ May take up to 1 min. -----------------------------'
    )

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    w_computed2, f_values2 = stochastic_gradient_descent(
        logistic_cost_function, data['x_train'], data['y_train'], w_0, EPOCHS,
        eta, MINIBATCH_SIZE)

    print('Final value of optimization objective: {:.4f}'.format(
        f_values2[-1][0]))
    print('\n--- Press any key to continue ---')
    plot_f_values(f_values1, f_values2)

    print(
        '\n----------------------- Model selection -------------------------------'
    )
    print('--Optimization method: SGD--')
    print('--Training criterium: regularized_logistic_cost_function--')
    print('--Step: {}'.format(eta))
    print('--Number of epochs: {}--'.format(EPOCHS))
    print('--Mini-batch size: {}--'.format(MINIBATCH_SIZE))

    w_0 = np.zeros([data['x_train'].shape[1], 1])
    l, t, w_computed, F = model_selection(data['x_train'], data['y_train'],
                                          data['x_val'], data['y_val'], w_0,
                                          EPOCHS, eta, MINIBATCH_SIZE, lambdas,
                                          thetas)

    print('The best regulatization coefficient: {}'.format(l))
    print('The best threshold value (theta): {:.4f}'.format(t))
    print('The best value of F-measure: {:.4f}'.format(np.max(F)))
    print('\n--- Press any key to continue ---')
    plot_theta_lambda(F, thetas, lambdas)

    print(
        '\n------------------------ FACE DETECTION -------------------------------\n'
    )
    animate_face_detect(w_computed, t)