Exemplo n.º 1
0
def vis_rocs():
    data = io.loadmat(
        'D:/WashU/2020SPR/CSE517 Machine Learning/data/data_train_default.mat')
    X = data['X']
    Y = data['Y']
    xTr, xTv, yTr, yTv = valsplit(X, Y)

    MAXITER = 100
    STEPSIZE = 1e-01

    #         %% Ridge Regression
    d, n = xTr.shape
    f = lambda w: ridge(w, xTr, yTr, 0.1)
    ws = grdescent(f, np.zeros((d, 1)), STEPSIZE, MAXITER)

    preds = linearmodel(ws, xTv)
    fpr, tpr, sqauc = area_under_roc_curve(yTv, preds)

    plt.plot(fpr, tpr, color="blue", linewidth=2.0, label="ridge")

    #         %% Hinge Loss
    d, n = xTr.shape
    f = lambda w: hinge(w, xTr, yTr, 0.1)
    wh = grdescent(f, np.zeros((d, 1)), STEPSIZE, MAXITER)
    preds = linearmodel(wh, xTv)
    fpr, tpr, hinauc = area_under_roc_curve(yTv, preds)

    plt.plot(fpr, tpr, color="green", linewidth=2.0, label="hinge")

    #         %% Logistic Regression
    d, n = xTr.shape
    f = lambda w: logistic(w, xTr, yTr)
    wl = grdescent(f, np.zeros((d, 1)), STEPSIZE, MAXITER)
    preds = linearmodel(wl, xTv)
    fpr, tpr, logauc = area_under_roc_curve(yTv, preds)

    plt.plot(fpr, tpr, color="red", linewidth=2.0, label="logistic")
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.legend(loc='upper right')

    print("Hinge loss: Area under the curve: %.2f" % hinauc)
    print("Logistic loss: Area under the curve: %.2f" % logauc)
    print("Squared loss: Area under the curve: %.2f" % sqauc)
    plt.show()
    return
def trainspamfilter(xTr, yTr):
    # INPUT:
    # xTr, yTr
    #
    # OUTPUT: w_trained
    #
    # Consider optimizing the input parameters for your loss and GD!

    f = lambda w: hinge(w, xTr, yTr, 0.001)
    # f = lambda w : logistic(w, xTr, yTr, 0.0001)
    # f = lambda w : ridge(w, xTr, yTr, 0.09)
    w_trained = grdescent(f, np.zeros((xTr.shape[0], 1)), 1e-04, 1000)
    io.savemat('w_trained.mat', mdict={'w': w_trained})
    return w_trained
def trainspamfilter(xTr, yTr):

    #
    # INPUT:
    # xTr
    # yTr
    #
    # OUTPUT: w_trained
    #
    # Feel free to change this code any way you want

    f = lambda w: hinge(w, xTr, yTr, .1)
    w_trained = grdescent(f, np.zeros((xTr.shape[0], 1)), 1e-06, 2000)
    io.savemat('w_trained.mat', mdict={'w': w_trained})
    return w_trained
Exemplo n.º 4
0
def trainspamfilter(xTr, yTr):

    #
    # INPUT:
    # xTr
    # yTr
    #
    # OUTPUT: w_trained
    #
    # Consider optimizing the input parameters for your loss and GD!

    ridge_params = [1e-02, 1000]

    f = lambda w: ridge(w, xTr, yTr, .01)
    w_trained = grdescent(f, np.zeros((xTr.shape[0], 1)), 0.001, 1000)
    io.savemat('w_trained.mat', mdict={'w': w_trained})
    return w_trained
Exemplo n.º 5
0
def trainspamfilter(xTr, yTr):

    #
    # INPUT:
    # xTr
    # yTr
    #
    # OUTPUT: w_trained
    #
    # Feel free to change this code any way you want

    # f = lambda w : hinge(w,xTr,yTr,.01)
    f = lambda w: ridge(w, xTr, yTr, 1)
    w_trained = grdescent(f, np.zeros((xTr.shape[0], 1)), 7e-6, 1000)
    # w_trained = grdescent(f, np.random.normal(0,1,(xTr.shape[0],1)), 7e-6, 2000)
    io.savemat('w_trained.mat', mdict={'w': w_trained})
    return w_trained