def reg_logistic_implementation(y, x, degrees, ratio, seed, max_iters, gamma):
    
    from helpers import build_poly, split_data
    
    # Split the data based on the input ratio into training and testing data
    x_tr, y_tr, x_te, y_te = split_data(x,y,ratio,seed)
    
    losses_tr = []
    losses_te = []
    
    
    for degree in degrees:
        print('degree = ',degree)
        
        # Build a training polynomial basis based on the choice of degree
        tx_tr = build_poly(x_tr, degree)
        
        # Initialize starting point of the gradient descent
        initial_w = np.zeros(tx_tr.shape[1])
        
        # Perform iteration - calculate w(t+1) and calculate the new loss
        w, loss_tr = reg_logistic_regression(y_tr, tx_tr, initial_w, max_iters, gamma)
        
        np.append(losses_tr,loss_tr)
        
        # Build a testing polynomial basis based on the choice of degree
        tx_te = build_poly(x_te, degree)
        
        # Test the validity of the predictions with the help of the test data
        correct_percentage, loss_te = reg_logistic_test(y_te,tx_te,w,degree)
        
        np.append(losses_te,loss_te)
    
    
    return
def cross_validation_rr(y, x, k_indices, k, lambda_, degree):
    """train and test ridge regression model using cross validation"""
    x_test = x[k_indices[k]]
    x_train = np.delete(x, [k_indices[k]], axis=0)
    y_test = y[k_indices[k]]
    y_train = np.delete(y, [k_indices[k]], axis=0)

    x_tr_poly = helpers.build_poly(x_train, degree)
    x_te_poly = helpers.build_poly(x_test, degree)

    w, loss_tr = imp.ridge_regression(y_train, x_tr_poly, lambda_)
    loss_te = imp.compute_mse(y_test, x_te_poly, w)
    
    return loss_tr, loss_te
Esempio n. 3
0
def cross_validation_ridge_regression(y, x, k_indices, k, lambdas, degrees):
    """
    Completes k-fold cross-validation using the ridge regression method.
    Here, we build polynomial features and create four subsets using
    the jet feature.
    """
    # get k'th subgroup in test, others in train
    msk_test = k_indices[k]
    msk_train = np.delete(k_indices, (k), axis=0).ravel()

    x_train_all_jets = x[msk_train, :]
    x_test_all_jets = x[msk_test, :]
    y_train_all_jets = y[msk_train]
    y_test_all_jets = y[msk_test]

    # split in 4 subsets the training set
    msk_jets_train = get_jet_masks(x_train_all_jets)
    msk_jets_test = get_jet_masks(x_test_all_jets)

    # initialize output vectors
    y_train_pred = np.zeros(len(y_train_all_jets))
    y_test_pred = np.zeros(len(y_test_all_jets))

    for idx in range(len(msk_jets_train)):
        x_train = x_train_all_jets[msk_jets_train[idx]]
        x_test = x_test_all_jets[msk_jets_test[idx]]
        y_train = y_train_all_jets[msk_jets_train[idx]]

        # data pre-processing
        x_train, x_test = process_data(x_train, x_test, False)

        phi_train = build_poly(x_train, degrees[idx])
        phi_test = build_poly(x_test, degrees[idx])

        phi_train = add_constant_column(phi_train)
        phi_test = add_constant_column(phi_test)

        # compute weights using given method
        weights, loss = ridge_regression(y=y_train, tx=phi_train, lambda_=lambdas[idx])

        y_train_pred[msk_jets_train[idx]] = predict_labels(weights, phi_train)
        y_test_pred[msk_jets_test[idx]] = predict_labels(weights, phi_test)

    # compute accuracy for train and test data
    acc_train = compute_accuracy(y_train_pred, y_train_all_jets)
    acc_test = compute_accuracy(y_test_pred, y_test_all_jets)

    return acc_train, acc_test
def least_squares_demo(y, x, k):
    """return error for least square model"""
    seed = 1
    weights=[]
    mse_errors = []
    
    tx = helpers.build_poly(x, 1)

    # Initialization
    w_initial = np.zeros(tx.shape[1])

    # split data in k fold
    k_indices = helpers.build_k_indices(y, k, seed)
    
    for i in range(k):
            mse_te, opt_w = cross_validation_ls(y, tx, k_indices, i)
            mse_errors.append(mse_te)
            weights.append([opt_w])
    
    mse = np.min(mse_errors)
    opt_w = weights[np.argmin(mse_errors)]
    y_model = helpers.predict_labels(np.array(opt_w).T, tx)

    #Computing accuracy
    print("   mse={mse}".format(mse = mse))
    accuracy = (list(y_model.flatten() == y).count(True))/len(y_model)
    print("   accuracy={acc:.3f}".format(acc=accuracy))
Esempio n. 5
0
def cross_validation(y, x, k_indices, k, lambda_, degree):

    te_indice = k_indices[k]
    tr_indice = k_indices[~(np.arange(k_indices.shape[0]) == k)]
    tr_indice = tr_indice.reshape(-1)
    y_te, y_tr = y[te_indice], y[tr_indice]
    x_te, x_tr = x[te_indice], x[tr_indice]

    tx_tr = build_poly(x_tr, degree)
    tx_te = build_poly(x_te, degree)

    w, _ = ridge_regression(y_tr, tx_tr, lambda_)

    y_tr_pred = predict_labels(w, tx_tr)
    y_te_pred = predict_labels(w, tx_te)

    loss_tr = sum(y_tr_pred != y_tr) / len(y_tr)
    loss_te = sum(y_te_pred != y_te) / len(y_te)

    return loss_tr, loss_te, w
Esempio n. 6
0
def logistic_implementation(y,
                            x,
                            degrees,
                            ratio,
                            seed,
                            max_iters,
                            gamma,
                            Newton=False):
    from helpers import build_poly, split_data

    x_tr, y_tr, x_te, y_te = split_data(x, y, ratio, seed)

    losses_tr = []
    losses_te = []

    for degree in degrees:
        print('degree = ', degree)
        tx_tr = build_poly(x_tr, degree)
        initial_w = np.zeros(tx_tr.shape[1])

        if Newton == False:
            w, loss_tr = logistic_regression(y_tr, tx_tr, initial_w, max_iters,
                                             gamma)
        else:
            w, loss_tr = logistic_newton(y_tr, tx_tr, initial_w, max_iters)

        np.append(losses_tr, loss_tr)

        tx_te = build_poly(x_te, degree)
        correct_percentage, loss_te = logistic_test(y_te, tx_te, w, degree)

        np.append(losses_te, loss_te)

    #plt.plot(degrees,losses_tr,'r',degrees,losses_te,'b')

    return
def lrr_demo(y, x, k):
    """find best hyperparameters and return error for regularized logistic regression model"""
    #Adding constant term
    tx = helpers.build_poly(x, 4)
    
    seed = 1
    max_iters = 50
    lambdas = np.logspace(-4, -3, 1)
    gammas = np.logspace(-4, -3, 1)
    hyperparams = [(gamma,lambda_) for gamma in gammas for lambda_ in lambdas]

    w_initial = np.zeros(tx.shape[1])
    
    # split data in k fold
    k_indices = helpers.build_k_indices(y, k, seed)

    result_loss =[]
    result_opt_w=[]
    for gamma,lambda_ in hyperparams:  
            loss_errors=[]
            weights=[]
            
            for i in range(k):
                loss_te, opt_w = cross_validation_lrr(y, tx, k_indices, i, lambda_, gamma, max_iters, w_initial)
                loss_errors.append(loss_te)
                weights.append([opt_w])
    
            result_loss.append(np.mean(loss_errors))
            result_opt_w.append(np.mean(weights,axis=0))

    
    del loss_errors
    del weights
    
    mse = np.min(result_loss)
    hyper_opt= hyperparams[np.argmin(result_loss)]
    print("   gamma={g:.3f}, mse={mse:.3f} lambda{l:.3f}".format(mse = mse, g=hyper_opt[0], l=hyper_opt[1]))

    opt_w = result_opt_w[np.argmin(result_loss)]
   
    #Training Accuracy
    y_predicted = helpers.predict_labels(opt_w.T, tx)
    accuracy = (list(y_predicted.flatten() == y).count(True))/len(y)
    print("   accuracy={acc:.3f}".format(acc=accuracy))
    
    del result_loss
    del result_opt_w
def lr_demo(y, x, k):
    """find best hyperparameters and return error for logistic regression model"""
    max_iters = 100
    gammas = np.logspace(-4, -3, 1)
    seed = 1
    
    # adding constant term
    tx = helpers.build_poly(x, 1)
    
    # Initialization
    w_initial = np.zeros(tx.shape[1])
    
    # split data in k fold
    k_indices = helpers.build_k_indices(y, k, seed)

    gen_opt_w = []
    gen_loss = []

    #gamma selection
    for gamma in gammas:
        weights=[]
        loss_errors = []
        
        for i in range(k):
            loss_te, opt_w = cross_validation_lr(y, tx, k_indices, i, gamma, max_iters, w_initial)
            loss_errors.append(loss_te)
            weights.append([opt_w])
    
        gen_loss.append(np.mean(loss_errors))
        gen_opt_w.append(np.mean(weights,axis=0))
    
    del weights
    del loss_errors
        
    opt_gamma = gammas[np.nanargmin(gen_loss)]
    opt_w = gen_opt_w[np.nanargmin(gen_loss)]
    print("   gamma={l:.3f},loss={loss:.3f}".format(loss = np.min(gen_loss), l = opt_gamma))

     #Training Accuracy
    y_predicted = helpers.predict_labels(opt_w.T, tx)
    accuracy = (list(y_predicted.flatten() == y).count(True))/len(y)
    print("   accuracy={acc:.3f}".format(acc = accuracy))
    
    del gen_opt_w
    del gen_loss
def LS_SGD_demo(y, x, k):
    """find best hyperparameters and return error for least square SGD model"""

    #Adding constant term
    tx = helpers.build_poly(x, 1)

    seed = 1
    max_iters = 50
    gammas = np.logspace(-3, 0, 10)
    batch_sizes = np.array([1])
    
    # Initialization
    w_initial = np.zeros(tx.shape[1])
    
    # split data in k fold
    k_indices = helpers.build_k_indices(y, k, seed)

    temp_mse = []
    temp_opt_w = []
    
    hyperparams = [(batch_size,gamma) for batch_size in batch_sizes for gamma in gammas ]
    
    for batch_size, gamma in hyperparams:  
            mse_errors = []
            weights = []
            
            for i in range(k):
                mse_te, opt_w = cross_validation_ls_SGD(y, tx, k_indices, i, gamma, max_iters, w_initial, batch_size)
                mse_errors.append(mse_te)
                weights.append([opt_w])
    
            temp_mse.append(np.mean(mse_errors))
            temp_opt_w.append(np.mean(weights, axis=0))
    
    mse = np.min(temp_mse)
    hyper_opt= hyperparams[np.argmin(temp_mse)]
    print("   gamma={g:.3f}, batch={b:.2f}, mse={mse:.3f}".format(mse = mse, g = hyper_opt[1], b = hyper_opt[0]))

    opt_w = temp_opt_w[np.nanargmin(temp_mse)]

    #Training Accuracy
    y_predicted = helpers.predict_labels(opt_w.T, tx)
    accuracy = (list(y == y_predicted.flatten()).count(True))/len(y)
    print("   accuracy={acc:.3f}".format(acc = accuracy))
Esempio n. 10
0
def LS_GD_demo(y, x, k):
    """find best hyperparameters and return error for least square GD model"""
    seed=1
    max_iters = 50
    gammas = np.logspace(-3, 0, 10)
    
    tx = helpers.build_poly(x, 1)

    # Initialization
    w_initial = np.zeros(tx.shape[1])
    
    # split data in k fold
    k_indices = helpers.build_k_indices(y, k, seed)

    gen_opt_w = []
    gen_mse = []

    #gamma selection
    for gamma in gammas:
        weights=[]
        mse_errors = []
        for i in range(k):
            mse_te, opt_w = cross_validation_ls_GD(y, tx, k_indices, i, gamma,max_iters, w_initial)
            mse_errors.append(mse_te)
            weights.append([opt_w])
        
        gen_mse.append(np.mean(mse_errors))
        gen_opt_w.append(np.mean(weights, axis=0))
        
    del weights
    del mse_errors
    
    opt_gamma = gammas[np.nanargmin(gen_mse)]
    opt_w = gen_opt_w[np.nanargmin(gen_mse)]
    mse_LS_GD = np.nanmin(gen_mse)
    
    print("   gamma={l:.3f}, mse={mse:.3f}".format(mse = mse_LS_GD, l = opt_gamma))

    #Training Accuracy
    y_predicted = helpers.predict_labels(opt_w.T, tx)
    accuracy = (list(y == y_predicted.flatten()).count(True))/len(y)
    print("   accuracy={acc:.3f}".format(acc=accuracy))
Esempio n. 11
0
def ridge_regression_demo(y, x, degree, k_fold):
    """find best hyperparameters and return error for ridge regression model"""
    seed = 1
    lambdas = np.logspace(-1.1, -0.8, 20)
    
    # split data in k fold
    k_indices = helpers.build_k_indices(y, k_fold, seed)
    
    # define lists to store the loss of training data and test data
    rmse_tr = []
    rmse_te = []
    
    # iterate over all the lambdas, compute model parameters, store the rmse
    for i in range(len(lambdas)):
        l = lambdas[i]
        avg_err_tr = 0
        avg_err_te = 0
        for k in range(k_fold):
            err = cross_validation_rr(y, x, k_indices, k, l, degree)
            avg_err_tr += err[0]
            avg_err_te += err[1]
        rmse_tr.append(np.sqrt(2 * avg_err_tr / k_fold))
        rmse_te.append(np.sqrt(2 * avg_err_te / k_fold))
    helpers.visualization(lambdas, rmse_tr, rmse_te)
    
    # find the best lambda
    min_err_index = 0
    for i in range(1, len(rmse_te)):
        if rmse_te[i] < rmse_te[min_err_index]:
            min_err_index = i
            
    lambda_opt = lambdas[min_err_index]
    
    x_poly = helpers.build_poly(x, degree)
    w_opt, mse = imp.ridge_regression(y, x_poly, lambda_opt)
    
    print("   lambda={l:.3f}, mse={mse:.3f}".format(mse = mse, l = lambda_opt))

    #Training Accuracy
    y_predicted = helpers.predict_labels(w_opt.T, x_poly)
    accuracy = (list(y == y_predicted.flatten()).count(True))/len(y)
    print("   accuracy={acc:.3f}".format(acc = accuracy))
Esempio n. 12
0
    ws = []

    for k in range(k_fold):

        loss_tr, loss_te, w = cross_validation(y, x, k_indices, k, lambda_,
                                               degree)
        losses_te.append(loss_te)
        losses_tr.append(loss_tr)
        ws.append(w)

    return np.mean(losses_te, axis=0), np.mean(losses_tr,
                                               axis=0), np.mean(ws, axis=0)


losses_te, losses_tr, w = cross_validation_ridge()

print(
    f'Average Missclassification proportion on test folds was {losses_te}. On Train folds it was {losses_tr}.'
)

test_y, test_x, test_ids = load_csv_data(DATA_PATH + 'test.csv')

# replace missing values with means determined from training data
test_x, _, _, _ = normalize(test_x, col_mean, xmin, xmax)

# create final predictions on testing data and submission csv
y_pred = predict_labels(w, build_poly(test_x, degree))
create_csv_submission(test_ids, y_pred, DATA_PATH + 'inferred.csv')
print(
    'Your final submission has been created and is called /data/inferred.csv')