Esempio n. 1
0
def main():
    # Set parameters
    degree = 20
    eta = 2
    max_iter = 200

    # Load data and expand with polynomial features
    f = open('data_logreg.json', 'r')
    data = json.load(f)
    for k, v in data.items():
        data[k] = np.array(v)  # Encode list into numpy array

    # Expand with polynomial features
    X_train = logreg_toolbox.poly_2D_design_matrix(data['x1_train'],
                                                   data['x2_train'], degree)
    n = X_train.shape[1]

    # Define the functions of the parameter we want to optimize
    def f(theta):
        return lr.cost(theta, X_train, data['y_train'])

    def df(theta):
        return lr.grad(theta, X_train, data['y_train'])

    # Test to verify if the computation of the gradient is correct
    logreg_toolbox.check_gradient(f, df, n)

    # Point for initialization of gradient descent
    theta0 = np.zeros(n)

    theta_opt, E_list = gd.gradient_descent(f, df, theta0, eta, max_iter)

    logreg_toolbox.plot_logreg(data, degree, theta_opt, E_list)
    plt.show()
Esempio n. 2
0
def main():
    # Set parameters
    degree = 5
    eta = 1.
    max_iter = 20

    # Load data and expand with polynomial features
    f = open('data_logreg.json', 'r')
    data = json.load(f)
    for k, v in data.items():
        data[k] = np.array(v)  # Encode list into numpy array

    # Expand with polynomial features
    X_train = logreg_toolbox.poly_2D_design_matrix(data['x1_train'],
                                                   data['x2_train'], degree)
    n = X_train.shape[1]

    # Define the functions of the parameter we want to optimize
    def f(theta):
        return lr.cost(theta, X_train, data['y_train'])

    def df(theta):
        return lr.grad(theta, X_train, data['y_train'])

    # Test to verify if the computation of the gradient is correct
    logreg_toolbox.check_gradient(f, df, n)

    # Point for initialization of gradient descent
    theta0 = np.zeros(n)

    #### VARIANT 1: Optimize with gradient descent
    # theta_opt, E_list = gd.gradient_descent(f, df, theta0, eta, max_iter)

    #### VARIANT 2: Optimize with gradient descent
    # theta_opt, E_list, lr_list = gd.adaptative_gradient_descent(f, df, theta0, eta, max_iter)
    # plt.plot(lr_list)
    # plt.xlabel('Iterations')
    # plt.ylabel('Learning rate')
    # print('Adaptative gradient, final learning rate: {:.3g}'.format(lr_list[-1]))

    #### VARIANT 3: Optimize with gradient descent
    res = minimize(f,
                   x0=theta0,
                   jac=df,
                   options={
                       'disp': True,
                       'maxiter': max_iter
                   })
    theta_opt = res.x.reshape((n, 1))
    E_list = []

    logreg_toolbox.plot_logreg(data, degree, theta_opt, E_list)
    plt.show()