Esempio n. 1
0
def plot_sample(nonreg=True, k = 0):
    (X_train_raw, y_train, X_test_raw, y_test) = get_data()
    X_train = transform(X_train_raw)
    X_test = transform(X_test_raw)
    N = X_train.shape[0]

    if nonreg is True:
        w = linear_regression(X_train, y_train)
    else:
        w = weight_decay_regression(X_train, y_train, 10.0**k)

    def plot_decision_fn(X):
        X_trans = transform(X)
        return np.sign(np.dot(X_trans, w))

    (cont_x, cont_y, cont_z) = decision_boundary_2D(-1, 1, 0.0025, -1, 1, 0.0025, 
                                                    plot_decision_fn)

    print("E_in :", linear_error(X_train, y_train, w))
    print("E_out:", linear_error(X_test, y_test, w))

    x_plot = X_test_raw[:,0]
    y_plot = X_test_raw[:,1]
    c = np.where(y_test==1, 'r', 'b')
    plt.scatter(x_plot,y_plot, c=c)
    plt.contour(cont_x, cont_y, cont_z, [0], colors='g')
    plt.xlim([-1, 1])
    plt.ylim([-1, 1])
    plt.grid()
    plt.show()
Esempio n. 2
0
    def fit(self, X, y, mus=None):
        self.mus = mus
        if mus is None:
            self.mus = cluster_centers(X, self.k, self.bounds)
        matrix = rbf_matrix(self.gamma, X, self.mus)
        if self.bias is True:
            matrix = np.hstack([np.ones((X.shape[0], 1)), matrix])

        self.weights = linear_regression(matrix, y)
Esempio n. 3
0
def answers():
    k_list = np.array([-3, -2, -1, 0, 1, 2, 3])

    (X_train_raw, y_train, X_test_raw, y_test) = get_data()
    X_train = transform(X_train_raw)
    X_test = transform(X_test_raw)

    assert X_train.shape[0] == y_train.shape[0]
    assert X_test.shape[0] == y_test.shape[0]

    w_nonreg = linear_regression(X_train, y_train)

    E_in_nonreg = linear_error(X_train, y_train, w_nonreg)
    E_out_nonreg = linear_error(X_test, y_test, w_nonreg)


    print("Number of train points:", X_train.shape[0])
    print("Number of test points: ", X_test.shape[0])
    print("\nNon-regularized stats: ")
    print("   E_in:   ", E_in_nonreg)
    print("   E_out:  ", E_out_nonreg)
    print("   w^2sum: ", np.power(w_nonreg, 2).sum())

    print("Regularized stats: ")
    E_in_reg = np.zeros(len(k_list))
    E_out_reg = np.zeros(len(k_list))
    
    for i, k in enumerate(k_list):
        decay = 10.0**k
        w_reg = weight_decay_regression(X_train, y_train, decay)
        E_in_reg[i] = linear_error(X_train, y_train, w_reg)
        E_out_reg[i] = linear_error(X_test, y_test, w_reg)

        print("   k =", k, "constant =", decay)
        print("      E_in:   ", E_in_reg[i])
        print("      E_out:  ", E_out_reg[i])
        print("      w^2sum: ", np.power(w_reg, 2).sum())
Esempio n. 4
0
def train_and_eval(X_train, y_train, X_val, y_val, X_test, y_test):
    w = linear_regression(X_train, y_train)
    E_val = linear_error(X_val, y_val, w)
    E_out = linear_error(X_test, y_test, w)
    return E_val, E_out