コード例 #1
0
ファイル: reg_test.py プロジェクト: dflemin3/CSE_546
import sys
sys.path.append("..")
import DML.regression.ridge_utils as ri
import DML.regression.lasso_utils as lu
import DML.regression.regression_utils as ru
import time

print("Testing ridge regression...")
seed = 1
sparse = False
w, X, y = ru.generate_norm_data(10000, 7, 10, sparse=sparse, seed=seed)

print(w.shape, X.shape, y.shape)

print("Performing ridge regression...")
print(ri.fit_ridge(X, y, lam=10.0))
print(w)

print("Testing lasso regression...")
# Generate some fake data
n = 10000
d = 75
k = 5
lam = 500.0
sparse = True
seed = 1
w, X, y = ru.generate_norm_data(n, k, d, sigma=1, sparse=sparse, seed=seed)

# What should the maximum lambda in a regularization step be?
print("Lambda_max:", lu.compute_max_lambda(X, y))
コード例 #2
0
                lammax=lammax,
                scale=scale,
                num=num,
                error_func=val.loss_01,
                thresh=thresh_arr[i],
                **kwargs)

        # Find minimum threshold, lambda from minimum validation error
        ind_t, ind_l = np.unravel_index(err_val.argmin(), err_val.shape)
        best_lambda = lams[ind_l]
        best_thresh = thresh_arr[ind_t]
        print("Best lambda:", best_lambda)
        print("Best threshold:", best_thresh)

# Fit for the class prediction regression coefficients
w0, w = ri.fit_ridge(X_train, y_train_true, lam=best_lambda)

print(w.shape, w0.shape)

# Using fit on training set, predict labels for train, test data by selecting whichever
# prediction is the largest (one vs all classification)
y_hat_train = cu.multi_linear_classifier(X_train, w, w0)
y_hat_test = cu.multi_linear_classifier(X_test, w, w0)

# Compute 01 Loss!
print("Training 01 Loss:", val.loss_01(y_train, y_hat_train))
print("Testing 01 Loss:", val.loss_01(y_test, y_hat_test))

# Compute square loss!
print("Training Square Loss:", val.square_loss(y_train_true, y_hat_train))
print("Testing Square Loss:", val.square_loss(y_test_true, y_hat_test))
コード例 #3
0
ファイル: hw2_1.2.py プロジェクト: dflemin3/CSE_546
    X_train = ru.naive_nn_layer(X_train, k=k, v=v)

    # Load in MNIST data
    print("Loading MNIST Testing data...")
    X_test, y_test = mu.load_mnist(dataset='testing')

    # Transform X
    print("Performing naive neural network layer transformation on testing set...")
    X_test = ru.naive_nn_layer(X_test, k=k, v=v)

# Fit for the class prediction regression coefficients on transformed training set
best_lambda = val.estimate_lambda(X_train, scale=1.0)
print("Best lambda: %.3lf." % best_lambda)
y_train_true = np.asarray(y_train[:, None] == np.arange(max(y_train)+1),dtype=int).squeeze()
y_test_true = np.asarray(y_test[:, None] == np.arange(max(y_test)+1),dtype=int).squeeze()
print("Fitting with ridge regression...")
w0, w = ri.fit_ridge(X_train, y_train_true, lam=best_lambda)

# Using fit on training set, predict labels for train, test data by selecting whichever
# prediction is the largest (one vs all classification)
y_hat_train = cu.multi_linear_classifier(X_train, w, w0)
y_hat_test = cu.multi_linear_classifier(X_test, w, w0)

# Compute 01 Loss!
print("Training 01 Loss:",val.loss_01(y_train,y_hat_train))
print("Testing 01 Loss:",val.loss_01(y_test,y_hat_test))

# Compute square loss!
print("Training Square Loss:",val.square_loss(y_train_true,y_hat_train))
print("Testing Square Loss:",val.square_loss(y_test_true,y_hat_test))
コード例 #4
0
ファイル: reg_test.py プロジェクト: dflemin3/CSE_546
import sys
sys.path.append("..")
import DML.regression.ridge_utils as ri
import DML.regression.lasso_utils as lu
import DML.regression.regression_utils as ru
import time

print("Testing ridge regression...")
seed = 1
sparse = False
w, X, y = ru.generate_norm_data(10000,7,10,sparse=sparse,seed=seed)

print(w.shape,X.shape,y.shape)

print("Performing ridge regression...")
print(ri.fit_ridge(X,y,lam=10.0))
print(w)

print("Testing lasso regression...")
# Generate some fake data
n = 10000
d = 75
k = 5
lam = 500.0
sparse = True
seed = 1
w, X, y = ru.generate_norm_data(n,k,d,sigma=1,sparse=sparse,seed=seed)

# What should the maximum lambda in a regularization step be?
print("Lambda_max:",lu.compute_max_lambda(X,y))