Exemple #1
0
def ls_solve_test():

    #------------------------------------------------------------------#
    A = np.array([[3,4],[5,6],[7,8],[17,10]])
    b = np.array([[1],[2],[3],[4]])
    w, E = reg.ls_solve(A, b)
    print(w)
def ls_solve_test():

    A = np.array([[3, 4], [5, 6], [7, 8], [17, 10]])
    b = np.array([[1], [2], [3], [4]])
    w, E = reg.ls_solve(A, b)

    return w, E
Exemple #3
0
def ls_solve_test():
    #------------------------------------------------------------------#
    # TODO: Test your implementation of the ls_solve definition
    # remove the 'pass' once implemented
    #------------------------------------------------------------------#
    A = np.transpose(np.array([[3, 4], [5, 6], [7, 8], [17, 10]]))
    B = [1, 2, 3, 4]
    print(reg.ls_solve(A, B))
def ls_solve_test():
    #------------------------------------------------------------------#
    # TODO: Test your implementation of the ls_solve definition

    A = np.array([[3,4], [5,6], [7,8], [17, 10]])
    b = np.array([[1],[2],[3],[4]])
    w, _ = reg.ls_solve(A,b)
    return w
def ls_solve_test():

    #Define known variable matrix A
    A = np.array([[3, 4], [5, 6], [7, 8], [17, 10]])
    b = np.array([[1], [2], [3], [4]])

    w, E = reg.ls_solve(A, b)
    return (w)
Exemple #6
0
def ls_solve_test():
    A = np.array([[3, 4], [5, 6], [7, 8], [17, 10]])
    B = np.array([[1], [2], [3], [4]])
    #------------------------------------------------------------------#
    # Test your implementation of the ls_solve definition
    #------------------------------------------------------------------#

    return reg.ls_solve(A, B)
def ls_solve_test():
    #------------------------------------------------------------------#
    # TODO: Test your implementation of the ls_solve definition
    # remove the 'pass' once implemented
    A = np.array([[3, 4], [5, 6], [7, 8], [17, 10]])
    b = np.array([1, 2, 3, 4])
    w, E = reg.ls_solve(A, b)
    print(w)
    print(E)
def ls_solve_test():
    #------------------------------------------------------------------#
    # TODO: Test your implementation of the ls_solve definition
    # remove the 'pass' once implemented
    A = np.array([[3, 4], [5, 6], [7, 8], [17, 10]])
    c = np.array([1, 2, 3, 4])
    b = c.reshape(-1, 1)
    w = reg.ls_solve(A, b)
    return w
def linear_regression(train_data, test_data, batch_size):
    # plot the training dataset
#    fig = plt.figure(figsize=(10,10))
#    ax = fig.add_subplot(111)
#    ax.plot(train_data[:,0], train_data[:,1], '*')
#    ax.grid()
#    ax.set_xlabel('x')
#    ax.set_ylabel('y')
#    ax.set_title('Training data')

    #---------------------------------------------------------------------#
    # TODO: Implement training of a linear regression model.
    # Here you should reuse ls_solve() from the registration mini-project.
    # The provided addones() function adds a column of all ones to a data
    # matrix X in a similar way to the c2h() function used in registration.

    trainX = train_data[:,0].reshape(-1,1)
    trainXones = util.addones(trainX)
    trainY = train_data[:,1].reshape(-1,1)
 
    Theta, _ = reg.ls_solve(trainXones, trainY) 
    print(Theta)
    #---------------------------------------------------------------------

    fig1 = plt.figure(figsize=(10,10))
    ax1 = fig1.add_subplot(111)
    util.plot_regression_no_bars(trainX, trainY, Theta, ax1)
    ax1.grid()
    ax1.set_xlabel('x')
    ax1.set_ylabel('y')
    ax1.legend(('Original data', 'Regression curve', 'Predicted Data', 'Error'))
    ax1.set_title('Training set')
    
    fig1.savefig("Regression train with batch size {}.png".format(batch_size)) 


    testX = test_data[:,0].reshape(-1,1)
    testY = test_data[:,1].reshape(-1,1)

    fig2 = plt.figure(figsize=(10,10))
    ax2 = fig2.add_subplot(111)
    util.plot_regression_no_bars(testX, testY, Theta, ax2)
    ax2.grid()
    ax2.set_xlabel('x')
    ax2.set_ylabel('y')
    ax2.legend(('Original data', 'Regression curve', 'Predicted Data', 'Error'))
    ax2.set_title('Test set')
    
    fig2.savefig("Regression test with batch size {}.png".format(batch_size)) 

    #---------------------------------------------------------------------#
    # TODO: Compute the error for the trained model.
    predictedY_test = util.addones(testX).dot(Theta)
    E_test  =np.sum(np.square(np.subtract(predictedY_test, testY)))
    #---------------------------------------------------------------------#

    return E_test, predictedY_test
Exemple #10
0
def quadratic_regression():
    # load the training, validation and testing datasets
    fn1 = '../data/linreg_ex_test.txt'
    fn2 = '../data/linreg_ex_train.txt'
    fn3 = '../data/linreg_ex_validation.txt'
    # shape (30,2) numpy array; x = column 0, y = column 1
    test_data = np.loadtxt(fn1)
    # shape (20,2) numpy array; x = column 0, y = column 1
    train_data = np.loadtxt(fn2)
    # shape (10,2) numpy array; x = column 0, y = column 1
    validation_data = np.loadtxt(fn3)

    # plot the training dataset
    fig = plt.figure(figsize=(10, 10))
    ax = fig.add_subplot(111)
    ax.plot(train_data[:, 0], train_data[:, 1], '*')
    ax.grid()
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_title('Training data')

    # ---------------------------------------------------------------------#
    # TODO: Implement training of a linear regression model.
    # Here you should reuse ls_solve() from the registration mini-project.
    # The provided addones() function adds a column of all ones to a data
    # matrix X in a similar way to the c2h() function used in registration.

    trainX = train_data[:, 0].reshape(-1, 1)
    trainXones = util.addones(trainX)
    trainXfull = np.concatenate((np.square(trainX), trainXones), axis=1)
    trainY = train_data[:, 1].reshape(-1, 1)

    Theta, _ = reg.ls_solve(trainXfull, trainY)
    # Tieta = np.linalg.inv(trainX.T.dot(trainX)).dot(trainX.T).dot(trainY)

    # ---------------------------------------------------------------------#

    fig1 = plt.figure(figsize=(10, 10))
    ax1 = fig1.add_subplot(111)
    util.plot_curve(trainX, Theta, ax1)
    ax1.grid()
    ax1.set_xlabel('x')
    ax1.set_ylabel('y')
    ax1.legend(
        ('Original data', 'Regression curve', 'Predicted Data', 'Error'))
    ax1.set_title('Training set')

    testX = test_data[:, 0].reshape(-1, 1)
    testXones = util.addones(testX)
    testXfull = np.concatenate((np.square(testX), testXones), axis=1)
    testY = test_data[:, 1].reshape(-1, 1)

    fig2 = plt.figure(figsize=(10, 10))
    ax2 = fig2.add_subplot(111)
    util.plot_curve(testX, Theta, ax2)
    ax2.grid()
    ax2.set_xlabel('x')
    ax2.set_ylabel('y')
    ax2.legend(
        ('Original data', 'Regression curve', 'Predicted Data', 'Error'))
    ax2.set_title('Test set')

    # ---------------------------------------------------------------------#
    # TODO: Compute the error for the trained model.
    # ---------------------------------------------------------------------#
    E_validation = np.linalg.norm(trainXfull.dot(Theta) - trainY)
    E_test = np.linalg.norm(testXfull.dot(Theta) - testY)

    return E_validation, E_test
Exemple #11
0
def quadratic_regression():
    #---------------------------------------------------------------------#
    # load the training, validation and testing datasets
    fn1 = '../data/linreg_ex_test.txt'
    fn2 = '../data/linreg_ex_train.txt'
    fn3 = '../data/linreg_ex_validation.txt'
    # shape (30,2) numpy array; x = column 0, y = column 1
    test_data = np.loadtxt(fn1)
    # shape (20,2) numpy array; x = column 0, y = column 1
    train_data = np.loadtxt(fn2)
    # shape (10,2) numpy array; x = column 0, y = column 1
    validation_data = np.loadtxt(fn3)

    # plot the training dataset
    fig = plt.figure(figsize=(10, 10))
    ax = fig.add_subplot(111)
    ax.plot(train_data[:, 0], train_data[:, 1], '*')
    ax.grid()
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_title('Training data')

    #---------------------------------------------------------------------#
    # TODO: Implement training of a linear regression model.
    # Here you should reuse ls_solve() from the registration mini-project.
    # The provided addones() function adds a column of all ones to a data
    # matrix X in a similar way to the c2h() function used in registration.

    trainX = train_data[:, 0].reshape(-1, 1)
    trainXsquared = np.square(train_data[:, 0]).reshape(-1, 1)
    trainX = np.hstack((trainX, trainXsquared))
    trainXones = util.addones(trainX)
    trainY = train_data[:, 1].reshape(-1, 1)

    validationX = validation_data[:, 0].reshape(-1, 1)
    validationXsquared = np.square(validation_data[:, 0]).reshape(-1, 1)
    validationX = np.hstack((validationX, validationXsquared))
    validationones = util.addones(validationX)
    validationY = validation_data[:, 1].reshape(-1, 1)

    Theta, _ = reg.ls_solve(trainXones, trainY)
    print(Theta)
    #---------------------------------------------------------------------#

    fig1 = plt.figure(figsize=(10, 10))
    ax1 = fig1.add_subplot(111)
    util.plot_regression(trainX, trainY, Theta, ax1)
    ax1.grid()
    ax1.set_xlabel('x')
    ax1.set_ylabel('y')
    ax1.legend(
        ('Original data', 'Regression curve', 'Predicted Data', 'Error'))
    ax1.set_title('Training set')

    testX = test_data[:, 0].reshape(-1, 1)
    testXsquared = np.square(testX[:, 0]).reshape(-1, 1)
    testX = np.hstack((testX, testXsquared))

    testY = test_data[:, 1].reshape(-1, 1)

    fig2 = plt.figure(figsize=(10, 10))
    ax2 = fig2.add_subplot(111)
    util.plot_regression(testX, testY, Theta, ax2)
    ax2.grid()
    ax2.set_xlabel('x')
    ax2.set_ylabel('y')
    ax2.legend(
        ('Original data', 'Regression curve', 'Predicted Data', 'Error'))
    ax2.set_title('Test set')

    # TODO: Compute the error for the trained model.
    predictedY = validationones.dot(Theta)
    predictedY_test = util.addones(testX).dot(Theta)
    E_validation = np.sum(np.square(np.subtract(predictedY, validationY)))
    E_test = np.sum(np.square(np.subtract(predictedY_test, testY)))
    #---------------------------------------------------------------------#

    return E_validation, E_test
Exemple #12
0
# The provided addones() function adds a column of all ones to a data
# matrix X in a similar way to the c2h() function used in registration.

trainX = train_data[:, 0].reshape(-1, 1)
trainXsquared = np.square(train_data[:, 0]).reshape(-1, 1)
trainX = np.hstack((trainX, trainXsquared))
trainXones = util.addones(trainX)
trainY = train_data[:, 1].reshape(-1, 1)

validationX = validation_data[:, 0].reshape(-1, 1)
validationXsquared = np.square(validation_data[:, 0]).reshape(-1, 1)
validationX = np.hstack((validationX, validationXsquared))
validationones = util.addones(validationX)
validationY = validation_data[:, 1].reshape(-1, 1)

Theta, _ = reg.ls_solve(trainXones, trainY)
print(Theta)
#---------------------------------------------------------------------#

fig1 = plt.figure(figsize=(10, 10))
ax1 = fig1.add_subplot(111)
util.plot_regression(trainX, trainY, Theta, ax1)
ax1.grid()
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.legend(('Original data', 'Regression curve', 'Predicted Data', 'Error'))
ax1.set_title('Training set')

testX = test_data[:, 0].reshape(-1, 1)
testXsquared = np.square(testX[:, 0]).reshape(-1, 1)
testX = np.hstack((testX, testXsquared))