Пример #1
0
def learning_curve(X, y, Xval, yval, lmd):
    # Number of training examples
    m = X.shape[0]

    # You need to return these values correctly
    error_train = []
    error_val = []

    # ===================== Your Code Here =====================
    # Instructions : Fill in this function to return training errors in
    #                error_train and the cross validation errors in error_val.
    #                i.e., error_train[i] and error_val[i] should give you
    #                the errors obtained after training on i examples
    #
    # Note : You should evaluate the training error on the first i training
    #        examples (i.e. X[:i] and y[:i])
    #
    #        For the cross-validation error, you should instead evaluate on
    #        the _entire_ cross validation set (Xval and yval).
    #
    # Note : If you're using your cost function (linear_reg_cost_function)
    #        to compute the training and cross validation error, you should
    #        call the function with the lamdba argument set to 0.
    #        Do note that you will still need to use lamdba when running the
    #        training to obtain the theta parameters.
    #
    for i in range(1, m + 1):
        Xi = X[:i, :]
        yi = y[:i, :]
        theta = tlr.train_linear_reg(Xi, yi, lmd)
        error_train.append(lrcf.linear_reg_cost_function(theta, Xi, yi, lmd))
        error_val.append(lrcf.linear_reg_cost_function(theta, Xval, yval, lmd))
    # ==========================================================
    return error_train, error_val
Пример #2
0
def validation_curve(X, y, Xval, yval):
    """
    generate a cross validation curve
    """
    # Selected values of lambda (don't change this)
    lambda_vec = np.array([0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])

    # You need to return these variables correctly.
    error_train = np.zeros(lambda_vec.size)
    error_val = np.zeros(lambda_vec.size)

    # ===================== Your Code Here =====================
    # Instructions : Fill in this function to return training errors in
    #                error_train and the validation errors in error_val. The
    #                vector lambda_vec contains the different lambda parameters
    #                to use for each calculation of the errors, i.e,
    #                error_train[i], and error_val[i] should give
    #                you the errors obtained after training with
    #                lmd = lambda_vec[i]
    for i in range(lambda_vec.size):
        lmd = lambda_vec[i]
        theta = train_linear_reg(X, y, lmd)
        error_train[i] = linear_reg_cost_function(theta, X, y, 0)[0]
        error_val[i] = linear_reg_cost_function(theta, Xval, yval, 0)[0]

    # ==========================================================

    return lambda_vec, error_train, error_val
Пример #3
0
def validation_curve(X, y, Xval, yval):
    # Selected values of lambda (don't change this)
    lambda_vec = np.array([0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])

    # You need to return these variables correctly.
    error_train = []
    error_val = []

    # ===================== Your Code Here =====================
    # Instructions : Fill in this function to return training errors in
    #                error_train and the validation errors in error_val. The
    #                vector lambda_vec contains the different lambda parameters
    #                to use for each calculation of the errors, i.e,
    #                error_train[i], and error_val[i] should give
    #                you the errors obtained after training with
    #                lmd = lambda_vec[i]
    #
    for lmd in lambda_vec:
        theta_l = tlr.train_linear_reg(X, y, lmd)
        error_train.append(lrcf.linear_reg_cost_function(theta_l, X, y, lmd))
        error_val.append(
            lrcf.linear_reg_cost_function(theta_l, Xval, yval, lmd))

    # ==========================================================

    return lambda_vec, error_train, error_val
def validation_curve(X, y, Xval, yval):# 尝试不同的lambda值
    lambda_vec = np.array([0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])
    error_train=np.zeros(lambda_vec.size)
    error_val=np.zeros(lambda_vec.size)
    i=0
    for lmd in lambda_vec:
        print(lmd)
        theta=tlr.train_linear_reg(X,y,lmd)
        error_train[i]=lrcf.linear_reg_cost_function(theta,X,y,0)[0] #注意计算误差时lmd=0
        error_val[i]=lrcf.linear_reg_cost_function(theta,Xval,yval,0)[0]
        i+=1
    print(error_train)
    return lambda_vec,error_train,error_val
def validation_curve(X, y, Xval, yval):
    lambda_vec = np.array([0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])

    error_train = np.zeros(lambda_vec.size)
    error_val = np.zeros(lambda_vec.size)

    for i in range(lambda_vec.size):
        lmd = lambda_vec[i]
        theta = train_linear_reg(X, y, lmd)

        error_train[i] = linear_reg_cost_function(X, y, theta, 0)[0]
        error_val[i] = linear_reg_cost_function(Xval, yval, theta, 0)[0]

    return lambda_vec, error_train, error_val
Пример #6
0
def learning_curve(X, y, Xval, yval, lmd):

    m = X.shape[0]  #训练样本数

    error_train = np.zeros(m)  #不同训练样本对应的训练误差
    error_val = np.zeros(m)  #不同训练样本对应的验证误差

    for i in range(m):
        x = X[:i + 1, :]
        y1 = y[:i + 1]
        theta = tlr.train_linear_reg(x, y1, lmd)
        error_train[i] = lrcf.linear_reg_cost_function(theta, x, y1, lmd)[0]
        error_val[i] = lrcf.linear_reg_cost_function(theta, Xval, yval, lmd)[0]
    return error_train, error_val
def learning_curve(X, y, Xval, yval, lmd):
    m = X.shape[0]

    error_train = np.zeros(m)
    error_val = np.zeros(m)

    for i in range(1, m + 1):
        X_train = X[:i]
        y_train = y[:i]
        theta = train_linear_reg(X_train, y_train, lmd)
        error_train[i - 1] = linear_reg_cost_function(X_train, y_train, theta,
                                                      0)[0]
        error_val[i - 1] = linear_reg_cost_function(Xval, yval, theta, 0)[0]

    return error_train, error_val
Пример #8
0
def validation_curve(X, y, Xval, yval):
    # 尝试不同的lambda值
    lambda_vec = np.array([0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])

    # 每设置一个lambda值,进行训练,返回此时的训练误差和验证误差
    error_train = np.zeros(lambda_vec.size)
    error_val = np.zeros(lambda_vec.size)

    i = 0
    for lmd in lambda_vec:
        theta = tlr.train_linear_reg(X, y, lmd)
        error_train[i] = lrcf.linear_reg_cost_function(theta, X, y,
                                                       0)[0]  #注意计算误差时lmd=0
        error_val[i] = lrcf.linear_reg_cost_function(theta, Xval, yval,
                                                     0)[0]  #注意计算误差时lmd=0
        i += 1

    return lambda_vec, error_train, error_val
Пример #9
0
def learning_curve(X, y, Xval, yval, lmd):
    # Number of training examples
    m = X.shape[0]

    # You need to return these values correctly
    error_train = np.zeros(m)
    error_val = np.zeros(m)

    # ===================== Your Code Here =====================
    # Instructions : Fill in this function to return training errors in
    #                error_train and the cross validation errors in error_val.
    #                i.e., error_train[i] and error_val[i] should give you
    #                the errors obtained after training on i examples
    #
    # Note : You should evaluate the training error on the first i training
    #        examples (i.e. X[:i] and y[:i])
    #
    #        For the cross-validation error, you should instead evaluate on
    #        the _entire_ cross validation set (Xval and yval).
    #
    # Note : If you're using your cost function (linear_reg_cost_function)
    #        to compute the training and cross validation error, you should
    #        call the function with the lamdba argument set to 0.
    #        Do note that you will still need to use lamdba when running the
    #        training to obtain the theta parameters.
    #
    # ==========================================================
    for i in range(1, m + 1):
        theta = tlr.train_linear_reg(X[:i, :], y[:i], lmd)
        """
        error_train[i-1] = np.sum((np.matmul(X[:i,:],theta) - y[:i]) ** 2)/(2*i)
        error_val[i-1] = np.sum((np.matmul(Xval,theta) - yval) ** 2)/(2*Xval.shape[0])
        """
        error_train[i - 1], _ = lrcf.linear_reg_cost_function(theta,
                                                              X[:i, :],
                                                              y[:i],
                                                              lmd=0)
        error_val[i - 1], _ = lrcf.linear_reg_cost_function(theta,
                                                            Xval,
                                                            yval,
                                                            lmd=0)

    return error_train, error_val
Пример #10
0
def learning_curve(x, y, xval, yval, curve_lambda):
    m = x.shape[0]
    m_xval = xval.shape[0]

    error_train = np.zeros((m, 1))
    error_val = np.zeros((m, 1))

    x = np.append(np.ones((m, 1)), x, axis=1)
    xval = np.append(np.ones((m_xval, 1)), xval, axis=1)
    for i in range(m):
        # compute parameter theta
        result = train_linear_reg(x[0:i + 1], y[0:i + 1], curve_lambda)
        theta = result['x']

        # compute training error
        error_train[i] = linear_reg_cost_function(x[0:i + 1], y[0:i + 1],
                                                  theta, 0)[0]

        # compute cross validation error
        error_val[i] = linear_reg_cost_function(xval, yval, theta, 0)[0]
    return error_train, error_val
Пример #11
0
def validation_curve(x, y, xval, yval):
    # Selected values of lambda (you should not change this)
    lambda_vec = np.array(([0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]))
    len_of_vec = len(lambda_vec)
    # You need to return these variables correctly.
    error_train = np.zeros((len_of_vec, 1))
    error_val = np.zeros((len_of_vec, 1))

    for i in range(len_of_vec):
        lambda_temp = lambda_vec[i]

        # compute parameter theta (learning)
        result = train_linear_reg(x, y, lambda_temp)
        theta = result['x']

        # compute training error
        j, grad = linear_reg_cost_function(x, y, theta, 0)
        error_train[i] = j

        # compute cross validation error
        j, grad = linear_reg_cost_function(xval, yval, theta, 0)
        error_val[i] = j
    return lambda_vec, error_train, error_val
Пример #12
0
 def grad_func(t):  #计算梯度
     return lrcf.linear_reg_cost_function(t, x, y, lmd)[1]
Пример #13
0
                                 data['Xtest'], data['ytest'].flatten()

# m = Number of examples
m = X.shape[0]

# Plot training data
plt.plot(X, y, 'rx', linewidth=1.5)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.show()

input('Program paused. Press enter to continue.\n')

# =========== Part 2: Regularized Linear Regression Cost =============
theta = np.array([1, 1])
J, _ = linear_reg_cost_function(np.c_[np.ones(m), X], y, theta, 1)

print(
    'Cost at theta = [1 ; 1]: {} \n(this value should be about 303.993192)\n'.
    format(J))

input('Program paused. Press enter to continue.\n')

# =========== Part 3: Regularized Linear Regression Gradient =============
theta = np.array([1, 1])
J, grad = linear_reg_cost_function(np.c_[np.ones(m), X], y, theta, 1)

print(
    'Gradient at theta = [1 ; 1]:  [{}; {}] \n(this value should be about [-15.303016; 598.250744])\n'
    .format(grad[0], grad[1]))
Пример #14
0
def cost_function(theta, x, y, cf_lambad):
    j, new_grad = linear_reg_cost_function(x, y, theta, cf_lambad)
    global grad
    grad = new_grad
    return j
Пример #15
0
m = y.size

# Plot training data
plt.figure()
plt.scatter(X, y, c='r', marker="x")
plt.xlabel('Change in water level (x)')
plt.ylabel('Water folowing out of the dam (y)')

input('Program paused. Press ENTER to continue')

# ===================== Part 2: Regularized Linear Regression Cost =====================
# You should now implement the cost function for regularized linear regression
#

theta = np.ones(2)
cost, _ = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1)

print(
    'Cost at theta = [1  1]: {:0.6f}\n(this value should be about 303.993192'.
    format(cost))

input('Program paused. Press ENTER to continue')

# ===================== Part 3: Regularized Linear Regression Gradient =====================
# You should now implement the gradient for regularized linear regression
#

theta = np.ones(2)
cost, grad = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1)

print(
Пример #16
0
yval = data['yval'].flatten()  # 提取验证集输出变量 并转换成一维数组
Xtest = data['Xtest']  # 提取测试集原始输入特征
ytest = data['ytest'].flatten()  # 提取测试集输出变量 并转换成一维数组
m = y.size  # 训练样本数

# 可视化训练集
plt.figure()
plt.scatter(X, y, c='r', marker="x")
plt.xlabel('Change in water level (x)')
plt.ylabel('Water folowing out of the dam (y)')

input('Program paused. Press ENTER to continue')
'''第2-1部分 编写正则化线性回归的代价函数'''

theta = np.ones(2)  # 初始化参数为1  只有一个原始输入特征 所以两个参数
cost, _ = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y,
                                        1)  # 为原始输入特征矩阵前面加一列1 正则化系数为1

# 返回计算的代价并与期望进行比较 验证程序正确性
print(
    'Cost at theta = [1  1]: {:0.6f}\n(this value should be about 303.993192'.
    format(cost))
'''第2-2部分 计算正则化线性回归的梯度'''

theta = np.ones(2)  # 初始化参数为1  只有一个原始输入特征 所以两个参数
cost, grad = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y,
                                           1)  # 为原始输入特征矩阵前面加一列1 正则化系数为1

# 返回计算的代价和梯度,并将梯度与期望进行比较  验证程序正确性
print(
    'Gradient at theta = [1  1]: {}\n(this value should be about [-15.303016  598.250744]'
    .format(grad))
 def cost_function(t):
     return linear_reg_cost_function(X, y, t, lmd)[0]
 def gradient(t):
     return linear_reg_cost_function(X, y, t, lmd)[1]
Пример #19
0
 def cost_func(t):
     return lrcf.linear_reg_cost_function(t, x, y, lmd)[0]
Пример #20
0
    m = X.shape[0]
    # Plot training data
    plt.ion()
    plt.figure()
    plt.plot(X, y, 'rx', markersize=10)
    plt.xlabel('Change in water level (x)')
    plt.ylabel('Water flowing out of the dam (y)')
    plt.axis([-60, 40, 0, 40])
    plt.pause(2)
    plt.close()
    print('Program paused. Press enter to continue.\n')
    # pause_func()

    # =========== Part 2: Regularized Linear Regression Cost =============
    theta = np.array([[1], [1]])
    J, grad = linear_reg_cost_function(np.append(np.ones((m, 1)), X, axis=1),
                                       y, theta, 1)

    print(
        'Cost at theta = [1 ; 1]: %f \n(this value should be about 303.993192)\n'
        % J)
    print('Program paused. Press enter to continue.\n')
    # pause_func()

    # =========== Part 3: Regularized Linear Regression Gradient =============
    print(
        'Gradient at theta = [1 ; 1]:  [%f; %f] \n(this value should be about [-15.303016; 598.250744])\n'
        % (grad[0], grad[1]))
    print('Program paused. Press enter to continue.\n')

    # =========== Part 4: Train Linear Regression =============
    # Write Up Note: The data is non-linear, so this will not give a great fit.