Esempio n. 1
0
File: a5.py Progetto: vrash/ml-class
def plot_validation_curves(X,y,Xval,yval,ldas):
    m = X.shape[0]
    j_trains = []
    j_cvs = []
    for lda in ldas:
        newth = train_linreg(X,y,lda)
        j_train = linreg.cost(newth,X,y,lda=0)

        j_cv = linreg.cost(newth,Xval,yval,lda)

        j_trains.append(j_train)
        j_cvs.append(j_cv)

    plt.plot(ldas, j_trains, '-b', label='Training')
    plt.plot(ldas, j_cvs, '-g', label='CV')
    plt.legend(loc='upper right')
    plt.xlabel('lambda')
    plt.ylabel('Error')
    plt.show()
Esempio n. 2
0
File: a5.py Progetto: vrash/ml-class
 def _train(X_ts,y_ts,Xval,yval,lda):
     newth = train_linreg(X_ts,y_ts,lda)
     # j_train = linreg.cost(newth,X,y,lda=0) # incorrect, because j_train is the error of the fit of the model based on what was used to construct it (the entire training set was not used to construct it, only subsets).
     j_train = linreg.cost(newth,X_ts,y_ts,lda=0)
     j_cv = linreg.cost(newth,Xval,yval,lda)
     return (j_train,j_cv)
Esempio n. 3
0
File: a5.py Progetto: vrash/ml-class
ldas = np.arange(0,10,0.1)

# uses X_poly, y, X_poly_val, yval
def plot_validation_curves(X,y,Xval,yval,ldas):
    m = X.shape[0]
    j_trains = []
    j_cvs = []
    for lda in ldas:
        newth = train_linreg(X,y,lda)
        j_train = linreg.cost(newth,X,y,lda=0)

        j_cv = linreg.cost(newth,Xval,yval,lda)

        j_trains.append(j_train)
        j_cvs.append(j_cv)

    plt.plot(ldas, j_trains, '-b', label='Training')
    plt.plot(ldas, j_cvs, '-g', label='CV')
    plt.legend(loc='upper right')
    plt.xlabel('lambda')
    plt.ylabel('Error')
    plt.show()

#plot_validation_curves(X_poly,y,X_poly_val,yval,ldas) # doesn't match assignment outline

## part 3.4
print('test error: {0}'.format(linreg.cost(newth,X_poly_test,ytest,lda)))

## part 3.5
#plot_learning_curves(X_poly,y,X_poly_test,ytest,lda,random_examples=True)
Esempio n. 4
0
# Append column of ones to X
ones = np.ones((2, m))
ones[1,:] = X
X = ones.T

# Initialize fitting parameters
theta = np.zeros(2)

# Some gradient descent settings
iterations = 1500;
alpha = 0.01;

print('Testing the cost function ...')
# compute and display initial cost
J = cost(X, y, theta);
print('With theta = [0 ; 0]\nCost computed = %f' % J);
print('Expected cost value (approx) 32.07');

# further testing of the cost function
J = cost(X, y, np.array([-1 , 2]));
print()
print('With theta = [-1 ; 2]\nCost computed = %f\n' % J);
print('Expected cost value (approx) 54.24');

input('Program paused. Press enter to continue.');

# run gradient descent
theta = gradient_descent(X, y, theta, alpha, iterations);

# print theta to screen
Esempio n. 5
0
 def test_cost(self):
     th = np.ones((1+self.nf,))
     self.assertAlmostEqual(303.993,linreg.cost(th,self.X,self.y,lda=1),places=3)
Esempio n. 6
0
 def test_cost(self):
     th = np.ones((1 + self.nf, ))
     self.assertAlmostEqual(303.993,
                            linreg.cost(th, self.X, self.y, lda=1),
                            places=3)