def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    for i in range(1, 13):
        error_t = []
        error_v = []
        for _ in range(50):
            arr = np.arange(len(X))
            np.random.shuffle(arr)
            arr = arr[:i + 1]
            Xi, yi, Xvali, yvali = np.squeeze(X[arr]), np.squeeze(
                y[arr]), np.squeeze(Xval[arr]), np.squeeze(yval[arr])
            print Xi.shape, yi.shape, Xvali.shape, yvali.shape
            print arr
            model = RegularizedLinearReg_SquaredLoss()
            theta = model.train(Xi, yi, reg=reg, num_iters=1000)
            error_t.append(model.loss(theta, np.array(Xi), np.array(yi), 0))
            error_v.append(model.loss(theta, Xvali, yvali, 0))
        print arr
        error_train[i - 1] = np.mean(np.array(error_t))
        error_val[i - 1] = np.mean(np.array(error_v))

    ###########################################################################
    return error_train, error_val
Example #2
0
File: utils.py Project: CryoSky/ML
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################

    for i in range(1, num_examples):
        error_train_sum = 0
        error_val_sum = 0
        for iter in range(50):
            reglinear_reg5 = RegularizedLinearReg_SquaredLoss()
            train_sample = np.random.choice(num_examples, i + 1)
            val_sample = np.random.choice(num_examples, i + 1)
            theta_opt0 = reglinear_reg5.train(X[train_sample],
                                              y[train_sample],
                                              reg,
                                              num_iters=1000)
            error_train_sum += reglinear_reg5.loss(theta_opt0, X[train_sample],
                                                   y[train_sample], 0)
            error_val_sum += reglinear_reg5.loss(theta_opt0, Xval[val_sample],
                                                 yval[val_sample], 0)
        error_train[i] = error_train_sum / 50
        error_val[i] = error_val_sum / 50

    ###########################################################################
    return error_train, error_val
Example #3
0
def averaged_learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    iters = 50
    error_train0 = np.zeros((iters,))
    error_val0 = np.zeros((iters,))
    model = RegularizedLinearReg_SquaredLoss()
    for i in range(1, num_examples + 1):
        for j in range(iters):
            shuffled_array = np.random.permutation(num_examples)
            selected = shuffled_array[0:i]
            X_train, y_train = X[selected, :], y[selected]
            theta = model.train(X_train, y_train, reg, num_iters=1000)
            X_val, y_val = Xval[selected, :], yval[selected]
            error_train0[j] = model.loss(theta, X_train, y_train, 0)
            error_val0[j] = model.loss(theta, X_val, y_val, 0)
        error_train[i - 1] = np.mean(error_train0)
        error_val[i - 1] = np.mean(error_val0)


    ###########################################################################
    return error_train, error_val
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    for i in range(num_examples):
        for j in range(50):
            num_inc_list = range(num_examples)
            number_of_samples = i
            random_items = random.sample(population=num_inc_list,
                                         k=number_of_samples)
            reglinear_reg = RegularizedLinearReg_SquaredLoss()
            theta_opt = reglinear_reg.train(X[random_items, :],
                                            y[random_items],
                                            reg=reg,
                                            num_iters=1000)
            error_train[i] += reglinear_reg.loss(theta_opt, X[random_items, :],
                                                 y[random_items], 0) / 50.
            error_val[i] += reglinear_reg.loss(theta_opt, Xval, yval, 0) / 50.

    ###########################################################################
    return error_train, error_val
Example #5
0
def averaged_learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################

    n = 50
    k = 8
    reglinear_reg2 = RegularizedLinearReg_SquaredLoss()
    for i in range(0, num_examples):
        for j in range(0, n):
            # sample without replacement
            Samples = np.random.permutation(num_examples)
            testSet = Samples[0:i+1]
            
            # Wrong! since it's sample with replacement, we want to devide num_examples into n subset
            ##index = np.random.choice(num_examples, size = k)
            ##index_val = np.random.choice(num_examples, size = k)
            
            X_train = X[testSet, :]
            y_train = y[testSet]
            X_val = Xval[testSet, :]
            y_val = yval[testSet]
            
            op_theta = reglinear_reg2.train(X_train, y_train, reg = reg, num_iters = 3000)    
            error_train[i] += reglinear_reg2.loss(op_theta, X_train, y_train, 0) / n
            error_val[i] += reglinear_reg2.loss(op_theta, X_val, y_val, 0) / n
            

    ###########################################################################
    return error_train, error_val
Example #6
0
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    total_repeat = 50
    reglinear_reg1 = RegularizedLinearReg_SquaredLoss()

    for i in range(num_examples):
        for count in range(1, total_repeat):
            index = random.sample(range(num_examples), i + 1)
            theta = reglinear_reg1.train(X[index, :],
                                         y[index],
                                         reg=reg,
                                         num_iters=10000)
            error_train[i] += reglinear_reg1.loss(theta, X[index, :], y[index],
                                                  0.0)
            error_val[i] += reglinear_reg1.loss(theta, Xval[index, :],
                                                yval[index], 0.0)
        error_train[i] /= total_repeat
        error_val[i] /= total_repeat

    ###########################################################################
    return error_train, error_val
Example #7
0
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################

    #print(X.shape)
    reglinear_reg = RegularizedLinearReg_SquaredLoss()

    for i in range(0, num_examples):
        for j in range(50):
            rand1 = np.random.choice(X.shape[0], i + 1, replace=False)
            theta = reglinear_reg.train(X[rand1],
                                        y[rand1],
                                        reg,
                                        num_iters=1000)
            error_train[i] += np.sum(
                np.square(np.dot(X[rand1], theta) - y[rand1])) / (2 * (i + 1))
            error_val[i] += np.sum(
                np.square(np.dot(Xval[rand1], theta) -
                          yval[rand1])) / (2 * (i + 1))
        error_train[i] = error_train[i] / 50
        error_val[i] = error_val[i] / 50
    ###########################################################################
    return error_train, error_val
Example #8
0
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################

    for i in xrange(num_examples):
        error_train_vec = []
        error_val_vec = []
        for count in xrange(50):
            index = range(num_examples)
            np.random.shuffle(index)
            index = index[:i + 1]
            X_i = X[index]
            y_i = y[index]
            Xval_i = Xval[index]
            yval_i = yval[index]
            reglinear_reg_alc = RegularizedLinearReg_SquaredLoss()
            theta_i = reglinear_reg_alc.train(X_i, y_i, reg, num_iters=1000)
            error_train_vec.append(reglinear_reg_alc.loss(
                theta_i, X_i, y_i, 0))
            error_val_vec.append(
                reglinear_reg_alc.loss(theta_i, Xval_i, yval_i, 0))
        error_train[i] = np.mean(error_train_vec)
        error_val[i] = np.mean(error_val_vec)

    ###########################################################################
    return error_train, error_val
Example #9
0
def averaged_learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    repeat = 50
    error_train_random = np.zeros((num_examples, repeat))
    error_val_random = np.zeros((num_examples, repeat))
    rlr = RegularizedLinearReg_SquaredLoss()
    for i in range(1, num_examples+1):
        for j in range(repeat):
            train_random = np.random.permutation(X.shape[0])
            train_random = train_random[:i]
            X_random = X[train_random,:]
            y_random = y[train_random]
            val_random = np.random.permutation(Xval.shape[0])
            val_random = val_random[:i]         
            Xval_random = Xval[val_random,:]
            yval_random = yval[val_random]              
            best_theta = rlr.train(X_random, y_random, reg)
            error_train_random[i-1, j] = rlr.loss(best_theta, X_random, y_random, 0)
            error_val_random[i-1, j] = rlr.loss(best_theta, Xval_random, yval_random, 0)

    error_train = np.mean(error_train_random, axis = 1)
    error_val = np.mean(error_val_random, axis = 1)

    ###########################################################################
    return error_train, error_val
Example #10
0
def validation_curve(X, y, Xval, yval, reg_vec):

    #reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    error_train = np.zeros((len(reg_vec), ))
    error_val = np.zeros((len(reg_vec), ))

    for i in range(len(reg_vec)):
        reglinear_reg = RegularizedLinearReg_SquaredLoss()
        theta = reglinear_reg.train(X, y, reg_vec[i], num_iters=1000)
        error_train[i] = reglinear_reg.loss(theta, X, y, 0.0)
        error_val[i] = reglinear_reg.loss(theta, Xval, yval, 0.0)

    return reg_vec, error_train, error_val
Example #11
0
def validation_curve(X,y,Xval,yval):
    reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    error_train = np.zeros((len(reg_vec),))
    error_val = np.zeros((len(reg_vec),))
    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 5 lines of code expected                                                #
    ###########################################################################
    R = RegularizedLinearReg_SquaredLoss()
    for i, reg in enumerate(reg_vec):
        best_theta = R.train(X, y, reg)
        error_train[i] = R.loss(best_theta, X, y, 0)
        error_val[i] = R.loss(best_theta, Xval, yval, 0)
    return reg_vec, error_train, error_val
Example #12
0
def validation_curve(X,y,Xval,yval):
    reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    # reg_vec = [0, 0.1, 0.3, 1, 3, 10, 20, 30, 100, 300, 1000]
    error_train = np.zeros((len(reg_vec),))
    error_val = np.zeros((len(reg_vec),))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 5 lines of code expected                                                #
    ###########################################################################
    for i, reg in enumerate(reg_vec):
        model = RegularizedLinearReg_SquaredLoss()
        theta = model.train(X, y, reg, num_iters=1000)
        error_train[i] = model.loss(theta, X, y, 0)
        error_val[i] = model.loss(theta, Xval, yval, 0)
    return reg_vec, error_train, error_val
Example #13
0
def validation_curve(X, y, Xval, yval):
    reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    error_train = np.zeros((len(reg_vec), ))
    error_val = np.zeros((len(reg_vec), ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 5 lines of code expected                                                #
    ###########################################################################
    reglinear_reg_vc = RegularizedLinearReg_SquaredLoss()
    for i in xrange(len(reg_vec)):
        theta_i = reglinear_reg_vc.train(X, y, reg_vec[i], num_iters=1000)
        error_train[i] = reglinear_reg_vc.loss(theta_i, X, y, 0)
        error_val[i] = reglinear_reg_vc.loss(theta_i, Xval, yval, 0)

    return reg_vec, error_train, error_val
Example #14
0
def learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))
    
    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 7 lines of code expected                                                #
    ###########################################################################
    for num_train in range(num_examples):
      reglinear_reg = RegularizedLinearReg_SquaredLoss()
      theta = reglinear_reg.train(X[:num_train+1],y[:num_train+1],reg=reg,num_iters=1000)
      error_train[num_train] = reglinear_reg.loss(theta,X[:num_train+1],y[:num_train+1],0.0)
      error_val[num_train] = reglinear_reg.loss(theta,Xval,yval,0.0)
    ###########################################################################

    return error_train, error_val
Example #15
0
def validation_curve(X,y,Xval,yval):
    
    reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    error_train = np.zeros((len(reg_vec),))
    error_val = np.zeros((len(reg_vec),))
  
    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 5 lines of code expected                                                #
    ###########################################################################
    for i in range(len(reg_vec)):
        reglinear_reg1 = RegularizedLinearReg_SquaredLoss()
        op_theta = reglinear_reg1.train(X, y, reg = reg_vec[i], num_iters = 3000)
        error_train[i] = np.dot((np.dot(X, op_theta) - y).T, np.dot(X, op_theta) - y) / (2 * X.shape[0])
        error_val[i] = np.dot((np.dot(Xval, op_theta) - yval).T, np.dot(Xval, op_theta) - yval) / (2 * Xval.shape[0]) 

    return reg_vec, error_train, error_val
Example #16
0
def learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))
    
    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 7 lines of code expected                                                #
    ###########################################################################
    for i in range(1, num_examples + 1):
        model = RegularizedLinearReg_SquaredLoss()
        theta = model.train(X[:i], y[:i], reg=reg, num_iters=1000)
        error_train[i-1] = model.loss(theta, X[:i], y[:i], 0)
        error_val[i-1] = model.loss(theta, Xval, yval, 0)
    ###########################################################################

    return error_train, error_val
Example #17
0
def learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))
    
    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 7 lines of code expected                                                #
    ###########################################################################
    R = RegularizedLinearReg_SquaredLoss()
    for i in range(num_examples):
        best_theta = R.train(X[:i+1,:], y[0:i+1], reg)
        error_train[i] = R.loss(best_theta, X[0:i+1,:], y[0:i+1], 0)
        error_val[i] = R.loss(best_theta, Xval, yval, 0)

    ###########################################################################

    return error_train, error_val
Example #18
0
def validation_curve(X, y, Xval, yval):
    reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    error_train = np.zeros((len(reg_vec), ))
    error_val = np.zeros((len(reg_vec), ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 5 lines of code expected                                                #
    ###########################################################################
    reglinear_reg = RegularizedLinearReg_SquaredLoss()
    for i in range(len(reg_vec)):
        theta = reglinear_reg.train(X, y, reg=reg_vec[i], num_iters=1000)
        error_train[i] = np.sum(np.square(np.dot(X, theta) - y)) / (
            2 * X.shape[0]
        )  #+ reg_vec[i]/(2*X.shape[0])*np.sum(np.square(theta))
        error_val[i] = np.sum(np.square(np.dot(Xval, theta) - yval)) / (
            2 * Xval.shape[0]
        )  #+ reg_vec[i]/(2*Xval.shape[0])*np.sum(np.square(theta))
    return reg_vec, error_train, error_val
Example #19
0
def learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))
    
    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 7 lines of code expected                                                #
    ###########################################################################

    for i in range(num_examples):
        reglinear_reg = RegularizedLinearReg_SquaredLoss()
        op_theta = reglinear_reg.train(X[: i+1], y[: i+1], reg = reg, num_iters = 3000)
        error_train[i] = np.dot((np.dot(X[: i+1], op_theta) - y[: i+1]).T, np.dot(X[: i+1], op_theta) - y[: i+1]) / (2 * (i + 1))
        error_val[i] = np.dot((np.dot(Xval, op_theta) - yval).T, np.dot(Xval, op_theta) - yval) / (2 * Xval.shape[0])

    ###########################################################################

    return error_train, error_val
Example #20
0
def validation_curve(X, y, Xval, yval):

    reg_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
    error_train = np.zeros((len(reg_vec), ))
    error_val = np.zeros((len(reg_vec), ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 5 lines of code expected                                                #
    ###########################################################################
    RegularizedLinearReg = RegularizedLinearReg_SquaredLoss()
    for i in range(len(reg_vec)):
        optimal_theta = RegularizedLinearReg.train(X,
                                                   y,
                                                   reg=reg_vec[i],
                                                   num_iters=1000)
        error_train[i] = RegularizedLinearReg.loss(optimal_theta, X, y,
                                                   reg_vec[i])
        error_val[i] = RegularizedLinearReg.loss(optimal_theta, Xval, yval,
                                                 reg_vec[i])

    return reg_vec, error_train, error_val
Example #21
0
def learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 7 lines of code expected                                                #
    ###########################################################################

    reglinear_reg_lc = RegularizedLinearReg_SquaredLoss()
    for i in xrange(num_examples):
        X_i = X[:i + 1]
        y_i = y[:i + 1]
        #theta_i = reglinear_reg_lc.train(X_i,y_i,reg=0.0,num_iters=1000)
        theta_i = reglinear_reg_lc.train(X_i, y_i, reg, num_iters=1000)
        error_train[i] = reglinear_reg_lc.loss(theta_i, X_i, y_i, 0)
        error_val[i] = reglinear_reg_lc.loss(theta_i, Xval, yval, 0)

    ###########################################################################

    return error_train, error_val
Example #22
0
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    index = [x for x in range(num_examples)]
    RegularizedLinearReg = RegularizedLinearReg_SquaredLoss()
    for i in range(num_examples):
        tmp1, tmp2 = 0, 0
        for _ in range(50):
            random.shuffle(index)
            index_1 = index[:i + 1]
            X_shuffle = X[index_1]
            y_shuffle = y[index_1]
            random.shuffle(index)
            index_2 = index[:i + 1]
            Xval_shuffle = Xval[index_2]
            yval_shuffle = yval[index_2]
            optimmal_theta = RegularizedLinearReg.train(X_shuffle,
                                                        y_shuffle,
                                                        reg,
                                                        num_iters=1000)
            tmp1 = tmp1 + RegularizedLinearReg.loss(optimmal_theta, X_shuffle,
                                                    y_shuffle, reg)
            tmp2 = tmp2 + RegularizedLinearReg.loss(
                optimmal_theta, Xval_shuffle, yval_shuffle, reg)

        error_train[i] = tmp1 / 50
        error_val[i] = tmp2 / 50

    ###########################################################################
    return error_train, error_val
Example #23
0
def averaged_learning_curve(X, y, Xval, yval, reg):
    num_examples, dim = X.shape
    error_train = np.zeros((num_examples, ))
    error_val = np.zeros((num_examples, ))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    repeated_times = 50
    for i in range(num_examples):
        for j in range(repeated_times):
            train_idx = random.sample(range(0, num_examples), i + 1)
            val_idx = random.sample(range(0, num_examples), i + 1)
            reglinear_reg = RegularizedLinearReg_SquaredLoss()
            theta = reglinear_reg.train(X[train_idx], y[train_idx], reg)
            error_train[i] += reglinear_reg.loss(theta, X[train_idx],
                                                 y[train_idx], 0.0)
            error_val[i] += reglinear_reg.loss(theta, Xval[val_idx],
                                               yval[val_idx], 0.0)
        error_train[i] /= repeated_times
        error_val[i] /= repeated_times
    ###########################################################################
    return error_train, error_val
Example #24
0
def averaged_learning_curve(X,y,Xval,yval,reg):
    num_examples,dim = X.shape
    error_train = np.zeros((num_examples,))
    error_val = np.zeros((num_examples,))

    ###########################################################################
    # TODO: compute error_train and error_val                                 #
    # 10-12 lines of code expected                                            #
    ###########################################################################
    repeat = 50
    for num_train in range(num_examples):
      random_train = range(0, num_examples)
      random_val = range(0, Xval.shape[0])
      for idx in range(repeat):
        random.shuffle(random_train)
        random.shuffle(random_val)
        reglinear_reg = RegularizedLinearReg_SquaredLoss()
        theta = reglinear_reg.train(X[random_train[:num_train+1]],y[random_train[:num_train+1]],reg=reg,num_iters=1000)
        error_train[num_train] += reglinear_reg.loss(theta,X[random_train[:num_train+1]],y[random_train[:num_train+1]],0.0)
        error_val[num_train] += reglinear_reg.loss(theta,Xval[random_val[:max(num_train+1,X.shape[0])]],yval[random_val[:max(num_train+1,X.shape[0])]],0.0)
      error_train[num_train] /= repeat 
      error_val[num_train] /=repeat
    ###########################################################################
    return error_train, error_val
Example #25
0
plot_utils.plot_data(X,y,'Change in water level (x)','Water flowing out of the dam (y)')
plt.savefig('fig6.pdf')

########################################################################
## =========== Part 2: Regularized Linear Regression ==================#
########################################################################
#  You should now implement the loss function and gradient of the
# loss function for regularized linear regression in reg_linear_regression_multi.py

# append a column of ones to matrix X

XX = np.vstack([np.ones((X.shape[0],)),X]).T

#  Train linear regression with lambda = 0

reglinear_reg1 = RegularizedLinearReg_SquaredLoss()
theta_opt0 = reglinear_reg1.train(XX,y,reg=0.0,num_iters=1000)
print 'Theta at lambda = 0 is ', theta_opt0

# plot fit over data and save it in fig7.pdf

plt.plot(X,np.dot(XX,theta_opt0),'g-',linewidth=3)
plt.savefig('fig7.pdf')

#######################################################################
# =========== Part 3: Learning Curve for Linear Regression ===========#
#######################################################################

reg = 1.0
XXval = np.vstack([np.ones((Xval.shape[0],)),Xval]).T
Example #26
0
plot_utils.plot_data(X,y,'Change in water level (x)','Water flowing out of the dam (y)')
plt.savefig('fig6.pdf')

########################################################################
## =========== Part 2: Regularized Linear Regression ==================#
########################################################################
#  You should now implement the loss function and gradient of the
# loss function for regularized linear regression in reg_linear_regression_multi.py

# append a column of ones to matrix X

XX = np.vstack([np.ones((X.shape[0],)),X]).T

#  Train linear regression with lambda = 0

reglinear_reg1 = RegularizedLinearReg_SquaredLoss()
theta_opt0 = reglinear_reg1.train(XX,y,reg=0.0,num_iters=1000)
print 'Theta at lambda = 0 is ', theta_opt0

# plot fit over data and save it in fig7.pdf

plt.plot(X,np.dot(XX,theta_opt0),'g-',linewidth=3)
plt.savefig('fig7.pdf')

#######################################################################
# =========== Part 3: Learning Curve for Linear Regression ===========#
#######################################################################

<<<<<<< HEAD
reg = 0.0
=======
Example #27
0
                                                            random_state=10)
X_train, X_val, y_train, y_val = train_test_split(X_train_val,
                                                  y_train_val,
                                                  test_size=0.3,
                                                  random_state=10)

X_train_norm, mu, sigma = utils.feature_normalize(X_train)
X_test_norm = (X_test - mu) / sigma
X_val_norm = (X_val - mu) / sigma
XX_train_norm = np.vstack([np.ones((X_train_norm.shape[0], )),
                           X_train_norm.T]).T
XX_test_norm = np.vstack([np.ones((X_test_norm.shape[0], )), X_test_norm.T]).T
XX_val_norm = np.vstack([np.ones((X_val_norm.shape[0], )), X_val_norm.T]).T

# lambda = 0
reglinear_reg1 = RegularizedLinearReg_SquaredLoss()
theta_opt0 = reglinear_reg1.train(XX_train_norm,
                                  y_train,
                                  reg=0.0,
                                  num_iters=1000)
print 'Theta at lambda = 0 is ', theta_opt0
print 'Test error of the best linear model with lambda = 0 is: ' + str(
    reglinear_reg1.loss(theta_opt0, XX_test_norm, y_test, 0))

# linear
reg_vec, error_train, error_val = utils.validation_curve(
    XX_train_norm, y_train, XX_val_norm, y_val)
plot_utils.plot_lambda_selection(reg_vec, error_train, error_val)
plt.savefig('liner' + '.png')
plt.show()