Пример #1
1
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import pinv

from common_functions import load_data, J_liner_regression, add_zero_feature, gradient_descent, matrix_args, feature_normalize

if __name__ == '__main__':
    X, y = load_data('ex1data2.txt')

    mu, sigma, X = feature_normalize(X)
    X = add_zero_feature(X)

    iterations = 400
    alphas = [0.01, 0.1]
    f, axarr = plt.subplots(len(alphas), sharex=True)
    plt.xlabel('Number of Iterations')
    plt.ylabel('Cost J')
    for i, alpha in enumerate(alphas):
        theta = np.zeros((X.shape[1], 1))
        theta, J_history = gradient_descent(J_liner_regression, X, y, iterations, theta, alpha)

        axarr[i].set_title('Alpha = {}'.format(alpha))
        axarr[i].plot(range(len(J_history)), J_history)

    plt.show()
    # % Estimate the price of a 1650 sq-ft, 3 br house
    # % ====================== YOUR CODE HERE ======================
    # % Recall that the first column of X is all-ones. Thus, it does
    # % not need to be normalized.

    x = np.ones((1, X.shape[1]))
Пример #2
1
        ax = fig.add_subplot(10, 10, i + 1, xticks=[], yticks=[])
        ax.imshow(x.reshape(32, 32).T, cmap=plt.cm.Greys_r, interpolation='nearest')

    plt.show()


if __name__ == '__main__':

    data = sio.loadmat('ex7data1.mat')
    X = data['X']
    x1, x2 = X.T

    plt.plot(x1, x2, 'bo')
    plt.show()

    mu, sigma, X_norm = feature_normalize(X)

    U, S = pca(X_norm)

    print('Top eigenvector: ');
    print(' U[:,0] = %s' % U[:, 0])
    print('(you should expect to see -0.707107 -0.707107)')

    K = 1
    Z = project_data(X_norm, U, K)
    print('Projection of the first example: %s' % Z[0])
    print('(this value should be about 1.49631261)')

    X_rec = recover_data(Z, U, K)
    print('Approximation of the first example: %s' % X_rec[0])
    print('(this value should be about  -1.05805279 -1.05805279)')
Пример #3
1
    lambda_coef = 0
    error_train, error_val = learning_curve(X_extended, y, add_zero_feature(Xval), yval, lambda_coef)

    plt.plot(range(1, m + 1), error_train, label="Train")
    plt.plot(range(1, m + 1), error_val, c="r", label="Cross validation")

    plt.legend()
    plt.title("Learning curve for linear regression (pow of polynomial = 1)")
    plt.xlabel("Number of training examples)")
    plt.ylabel("Error")
    plt.axis([0, 13, 0, 150])
    plt.show()

    p = 8
    mu, sigma, X_poly = feature_normalize(poly_features(X, p))
    X_poly = add_zero_feature(X_poly)

    prepare_X = lambda X: add_zero_feature((poly_features(X, p) - m) / sigma)

    X_poly_test = prepare_X(Xtest)
    X_poly_val = prepare_X(Xval)

    lambda_coef = 1
    theta = train_linear_regression(X_poly, y, lambda_coef)

    x = np.arange(np.min(X) - 15, np.max(X) + 25, 0.05)[:, np.newaxis]
    x_poly = prepare_X(x)

    plt.plot(x, np.dot(x_poly, theta))
    plot_data()
Пример #4
0
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import pinv

from common_functions import load_data, J_liner_regression, add_zero_feature, gradient_descent, matrix_args, feature_normalize

if __name__ == '__main__':
    X, y = load_data('ex1data2.txt')

    mu, sigma, X = feature_normalize(X)
    X = add_zero_feature(X)

    iterations = 400
    alphas = [0.01, 0.1]
    f, axarr = plt.subplots(len(alphas), sharex=True)
    plt.xlabel('Number of Iterations')
    plt.ylabel('Cost J')
    for i, alpha in enumerate(alphas):
        theta = np.zeros((X.shape[1], 1))
        theta, J_history = gradient_descent(J_liner_regression, X, y,
                                            iterations, theta, alpha)

        axarr[i].set_title('Alpha = {}'.format(alpha))
        axarr[i].plot(range(len(J_history)), J_history)

    plt.show()
    # % Estimate the price of a 1650 sq-ft, 3 br house
    # % ====================== YOUR CODE HERE ======================
    # % Recall that the first column of X is all-ones. Thus, it does
    # % not need to be normalized.
Пример #5
0
    lambda_coef = 0
    error_train, error_val = learning_curve(X_extended, y, add_zero_feature(Xval), yval, lambda_coef)

    plt.plot(range(1, m+1), error_train, label='Train')
    plt.plot(range(1, m+1), error_val, c='r', label='Cross validation')

    plt.legend()
    plt.title('Learning curve for linear regression (pow of polynomial = 1)')
    plt.xlabel('Number of training examples)')
    plt.ylabel('Error')
    plt.axis([0, 13, 0, 150])
    plt.show()

    p = 8
    mu, sigma, X_poly = feature_normalize(poly_features(X, p))
    X_poly = add_zero_feature(X_poly)

    prepare_X = lambda X: add_zero_feature((poly_features(X, p)-m)/sigma)

    X_poly_test = prepare_X(Xtest)
    X_poly_val = prepare_X(Xval)

    lambda_coef = 1
    theta = train_linear_regression(X_poly, y, lambda_coef)

    x = np.arange(np.min(X) - 15, np.max(X) + 25, 0.05)[:, np.newaxis]
    x_poly = prepare_X(x)

    plt.plot(x, np.dot(x_poly, theta))
    plot_data()
Пример #6
0
                  cmap=plt.cm.Greys_r,
                  interpolation='nearest')

    plt.show()


if __name__ == '__main__':

    data = sio.loadmat('ex7data1.mat')
    X = data['X']
    x1, x2 = X.T

    plt.plot(x1, x2, 'bo')
    plt.show()

    mu, sigma, X_norm = feature_normalize(X)

    U, S = pca(X_norm)

    print('Top eigenvector: ')
    print(' U[:,0] = %s' % U[:, 0])
    print('(you should expect to see -0.707107 -0.707107)')

    K = 1
    Z = project_data(X_norm, U, K)
    print('Projection of the first example: %s' % Z[0])
    print('(this value should be about 1.49631261)')

    X_rec = recover_data(Z, U, K)
    print('Approximation of the first example: %s' % X_rec[0])
    print('(this value should be about  -1.05805279 -1.05805279)')