def linear_pst_ridge(y, X, lam, max_iter=1000, tol=1e-7, algo="proj_gd"):
    """
    This function had not been finished;
    """
    n, p = X.shape
    x = np.zeros(p)
    XTX = np.dot(X.T, X)
    XTy = np.dot(X.T, y)
    ss = 1.0 / (max_eigval(
        XTX, eigvals_only=True, eigvals=(p - 1, p - 1), check_finite=False)[0]
                + lam)

    ridge_obj = lambda x: lr.l2_loss_obj(x, y, X) + lam * lr.l2_reg_obj(x)
    ridge_grad = lambda x, XTy, XTX: lr.l2_loss_grad2(
        x, XTy, XTX) + lam * lr.l2_reg_grad(x)

    if algo == "proj_gd":
        x_hat = lib.proj_gd(x,
                            ridge_grad,
                            proj.pst_quadrant,
                            ss,
                            ridge_obj,
                            max_iter,
                            tol,
                            grad={
                                "XTX": XTX,
                                "XTy": XTy
                            },
                            proj={})

    return x_hat
Example #2
0
def linear_svm(y, X, lam, max_iter=1000, tol=1e-7, algo="proj_gd"):
    n, p = X.shape
    alpha = np.zeros(n)
    XTX = np.dot(X.T, X)
    XDy = X * y[:, np.newaxis]
    ss = lam / max_eigval(
        XTX, eigvals_only=True, eigvals=(p - 1, p - 1), check_finite=False)[0]

    svm_obj = lambda a: lr.svm_dual_l2_obj(a, XDy, lam)

    if algo == "proj_gd":
        alpha_hat = lib.proj_gd(alpha,
                                lr.svm_dual_l2_grad,
                                proj.interval,
                                ss,
                                svm_obj,
                                max_iter,
                                tol,
                                grad={
                                    "XDy": XDy,
                                    "lam": lam
                                },
                                proj={
                                    "a": 0.0,
                                    "b": 1.0
                                })

    E = (alpha_hat > 0.0)
    return np.dot(XDy[E, :].T, alpha_hat[E]) / lam
def linear_ridge(y, X, lam, max_iter=1000, tol=1e-7, algo="gd"):
    n, p = X.shape
    x = np.zeros(p)
    XTX = np.dot(X.T, X)
    XTy = np.dot(X.T, y)
    ss = 1.0 / (max_eigval(XTX, eigvals_only=True, eigvals=(p-1, p-1),
            check_finite=False)[0] + lam)

    ridge_obj = lambda x: lr.l2_loss_obj(x, y, X) + lam * lr.l2_reg_obj(x)
    ridge_grad = lambda x, XTy, XTX: lr.l2_loss_grad2(x, XTy, XTX) + lam * lr.l2_reg_grad(x)
    
    if algo == "gd":
        x_hat = lib.plain_gd(x, ridge_grad,
                ss, ridge_obj, max_iter, tol, XTy=XTy, XTX=XTX )
    elif algo == "acc_gd":
        x_hat = lib.acc_gd(x, ridge_grad,
                ss, ridge_obj, max_iter, tol, XTy=XTy, XTX=XTX )
    elif algo == "prox_gd":
        x_hat = lib.prox_gd(x, lr.l2_loss_grad2, lr.l2_reg_prox,
                ss, lam, ridge_obj, max_iter, tol,
                grad={"XTy" : XTy, "XTX" : XTX}, prox={} )
    elif algo == "acc_prox_gd":
        x_hat = lib.acc_prox_gd(x, lr.l2_loss_grad2, lr.l2_reg_prox,
                ss, lam, ridge_obj, max_iter, tol,
                grad={"XTy" : XTy, "XTX" : XTX}, prox={} )
    elif algo == "acc_gd_restart":
        x_hat = lib.acc_gd_restart(x, ridge_grad,
                ss, ridge_obj, max_iter, tol, XTy=XTy, XTX=XTX )
    elif algo == "acc_prox_gd_restart":
        x_hat = lib.acc_prox_gd_restart(x, lr.l2_loss_grad2, lr.l2_reg_prox,
                ss, lam, ridge_obj, max_iter, tol,
                grad={"XTy" : XTy, "XTX" : XTX}, prox={} )

    return x_hat
def linear_slope(y, X, lam, theta, max_iter=1000, tol=1e-7, algo="prox_gd"):
    """
    theta in decreasing order
    """
    n, p = X.shape
    x = np.zeros(p)
    XTX = np.dot(X.T, X)
    XTy = np.dot(X.T, y)
    ss = 1.0 / max_eigval(
        XTX, eigvals_only=True, eigvals=(p - 1, p - 1), check_finite=False)[0]

    slope_obj = lambda x: lr.l2_loss_obj(x, y, X) + lam * lr.slope_reg_obj(
        x, theta)

    if algo == "prox_gd":
        x_hat = lib.prox_gd(x,
                            lr.l2_loss_grad2,
                            lr.slope_reg_prox,
                            ss,
                            lam,
                            slope_obj,
                            max_iter,
                            tol,
                            grad={
                                "XTy": XTy,
                                "XTX": XTX
                            },
                            prox={"theta": theta})
    elif algo == "acc_prox_gd":
        x_hat = lib.acc_prox_gd(x,
                                lr.l2_loss_grad2,
                                lr.slope_reg_prox,
                                ss,
                                lam,
                                slope_obj,
                                max_iter,
                                tol,
                                grad={
                                    "XTy": XTy,
                                    "XTX": XTX
                                },
                                prox={"theta": theta})
    elif algo == "acc_prox_gd_restart":
        x_hat = lib.acc_prox_gd_restart(x,
                                        lr.l2_loss_grad2,
                                        lr.slope_reg_prox,
                                        ss,
                                        lam,
                                        slope_obj,
                                        max_iter,
                                        tol,
                                        grad={
                                            "XTy": XTy,
                                            "XTX": XTX
                                        },
                                        prox={"theta": theta})

    return x_hat
Example #5
0
def linear_lasso(y, X, lam, max_iter=1000, tol=1e-7, algo="prox_gd"):
    n, p = X.shape
    x = np.zeros(p)
    XTX = np.dot(X.T, X)
    XTy = np.dot(X.T, y)
    ss = 1.0 / max_eigval(XTX, eigvals_only=True, eigvals=(p-1, p-1),
            check_finite=False)[0]

    lasso_obj = lambda x: lr.l2_loss_obj(x, y, X) + lam * lr.l1_reg_obj(x)

    if algo == "prox_gd":
        x_hat = lib.prox_gd(x, lr.l2_loss_grad2, lr.l1_reg_prox,
                ss, lam, lasso_obj, max_iter, tol,
                grad={"XTy" : XTy, "XTX" : XTX}, prox={} )
    elif algo == "acc_prox_gd":
        x_hat = lib.acc_prox_gd(x, lr.l2_loss_grad2, lr.l1_reg_prox,
                ss, lam, lasso_obj, max_iter, tol,
                grad={"XTy" : XTy, "XTX" : XTX}, prox={} )

    return x_hat