示例#1
0
文件: hw1.py 项目: yzcmf/CS446-HW1
def plot_linear():
    X, Y = utils.load_reg_data()
    w = linear_normal(X, Y)
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.plot_surface(X=X, Y=Y, Z=(X + Y) * w / (X + Y))
    plt.xlabel('x_data')
    plt.ylabel('y_data')
    # return plot
    return plt
示例#2
0
def plot_linear():
    # return plot
    X, Y = utils.load_reg_data()
    w = linear_normal(X, Y)
    x_min, x_max = X.min(), X.max()
    x = np.linspace(x_min, x_max, num=10000)
    x = np.insert(x, 0, 1, 1)
    fig = plt.figure()
    plt.plot(x[:1], x @ w)
    plt.scatter(X, Y, c='r')
    return fig
示例#3
0
def plot_poly():
    # return plot
    X, Y = utils.load_reg_data()
    w = poly_normal(X, Y)

    plt.plot(X, Y, X, (w[2] * X**2 + w[1] * X + w[0]))

    plt.title('Polynomial Regression')
    plt.xlabel('X')
    plt.ylabel('Y')

    plt.show()
示例#4
0
def plot_linear():
    '''
        Returns:
            Figure: the figure plotted with matplotlib
    '''
    X, Y = utils.load_reg_data()
    plt.scatter(X, Y, s=None, color='black')
    y = linear_normal(X, Y)
    Z = X
    n_by_d = Z.shape
    Z_new = torch.ones(n_by_d[0], 1)
    Z = torch.cat((Z_new, Z), 1)
    plt.plot(X, torch.matmul(Z, y), 'g')
    myplot = plt.gcf()
    myplot.savefig('3c.png')
示例#5
0
def nn_iris():
    X, y = utils.load_reg_data()
    n = X.shape[0]
    X_test = X[:int(n * 0.3), :]
    y_test = y[:int(n * 0.3)]

    # model
    X_train = X[int(n * 0.3) + 1:, :]
    y_train = y[int(n * 0.3) + 1:]

    y_hat = nn(X_train, y_train, X_test)

    print(y_hat)
    print(y_test)

    error = np.count_nonzero(np.abs(y_hat - y_test)) / y_test.shape[0]

    return error
示例#6
0
def plot_linear():
    X, y = utils.load_reg_data()

    w_numpy = linear_normal(X, y)

    X = np.hstack((X, np.ones((X.shape[0], 1), dtype=X.dtype)))
    X = torch.tensor(X, requires_grad=True).type(torch.FloatTensor)
    y = torch.tensor(y, requires_grad=True).type(torch.FloatTensor)
    # return plot
    X_numpy = X.detach().numpy()[:, 0]
    y_numpy = y.detach().numpy()

    # utils.contour_plot(min(X_numpy), max(X_numpy), min(y_numpy), max(y_numpy), M, ngrid = 33)

    plt.figure()
    plt.plot(X_numpy, y_numpy)
    plt.plot(X_numpy, X_numpy * w_numpy[1] + w_numpy[0])
    plt.title('Linear Normal Regression')
    plt.xlabel('X')
    plt.ylabel('Y')
    plt.show()
示例#7
0
def plot_poly():
    '''
    Returns:
        Figure: the figure plotted with matplotlib
    '''
    X, Y = utils.load_reg_data()
    plt.scatter(X, Y, s=None, color='black')
    y = poly_normal(X, Y)
    Z = X
    n_by_d = Z.shape
    Z_new = torch.ones(n_by_d[0], 1)
    Z = torch.cat((Z, Z * Z), 1)
    for j in range(0, n_by_d[1] - 1):
        for k in range(j + 1, n_by_d[1]):
            j_col = torch.FloatTensor([[Z[i][j]] for i in range(0, n_by_d[0])])
            k_col = torch.FloatTensor([[Z[i][k]] for i in range(0, n_by_d[0])])
            Z = torch.cat((Z, j_col * k_col), 1)
    Z = torch.cat((Z_new, Z), 1)
    plt.plot(X, torch.matmul(Z, y), 'g')
    myplot = plt.gcf()
    myplot.savefig('polynomial_regression_fit.png')

    plt.show()
示例#8
0
def plot_poly():
    # return plot
    X, Y = utils.load_reg_data()
    length = X.shape[1]
    X_p = X.copy()
    for i in range(length):
        for j in range(i, length):
            X_p = np.insert(X_p, X_p.shape[1], X_p[:,i]*X_p[:,j], 1)
    
    w = linear_normal(X_p, Y)
    
    x_min, x_max = X.min(), X.max()
    x = np.linspace(x_min, x_max, num=10000).reshape((-1,1))
    print(x.shape)
    for i in range(1):
        for j in range(i, 1):
            x = np.insert(x, 1, x[:,i]*x[:,j], 1)
    x = np.insert(x, 0, 1, 1)
    fig = plt.figure()
    print(x.shape)
    plt.plot(x[:,1], x @ w)
    plt.scatter(X, Y, c='r')
    return fig
示例#9
0
文件: hw1.py 项目: yzcmf/CS446-HW1
    while (counter < num_iter):
        cprev = c
        #          update0 = lrate * partial_cost_theta0(w0,w1,X,Y)
        update1 = lrate * partial_cost_thetal(w0, w1, X, Y)
        w1 -= update1
        theta1s.append(w1)
        c = cost(w0, w1, X, Y)
        costs.append(c)
        counter += 1


return w1, costs, theta1s
#           w0 -= update0
#           theta0s.append(w0)

X, Y = utils.load_reg_data()
w1, costs, theta1s = linear_gd(X, Y, 0.1, 1000)
print(w1)
plt.scatter(costs, theta1s)
plt.show()


def cost(theta0, theta1, x, y):
    #Initialize cost
    J = 0
    # The number of observations
    m = len(x)
    # Loop through each obervation
    for i in range(m):
        # Compute the hypothesis
        h = theta1 * x[i] + theta0