コード例 #1
0
ファイル: lightning.py プロジェクト: josephsalmon/benchOpt
class Solver(BaseSolver):
    name = 'Lightning'

    install_cmd = 'pip'
    requirements = ['sklearn-contrib-lightning']
    requirements_import = ['lightning']
    requirements_install = [
        'git+https://github.com/scikit-learn-contrib/lightning.git'
    ]

    def set_objective(self, X, y, lmbd):
        self.X, self.y, self.lmbd = X, y, lmbd

        self.clf = CDRegressor(loss='squared',
                               penalty='l1',
                               C=1,
                               alpha=self.lmbd,
                               tol=1e-15)

    def run(self, n_iter):
        self.clf.max_iter = n_iter
        self.clf.fit(self.X, self.y)

    def get_result(self):
        return self.clf.coef_.flatten()
コード例 #2
0
ファイル: lightning.py プロジェクト: tanglef/benchmark_lasso
class Solver(BaseSolver):
    name = 'Lightning'

    install_cmd = 'conda'
    requirements = [
        'pip:git+https://github.com/scikit-learn-contrib/lightning.git'
    ]
    references = [
        'M. Blondel, K. Seki and K. Uehara, '
        '"Block coordinate descent algorithms for large-scale sparse '
        'multiclass classification" '
        'Mach. Learn., vol. 93, no. 1, pp. 31-52 (2013)'
    ]

    def set_objective(self, X, y, lmbd):
        self.X, self.y, self.lmbd = X, y, lmbd

        self.clf = CDRegressor(loss='squared',
                               penalty='l1',
                               C=1,
                               alpha=self.lmbd,
                               tol=1e-15)

    def run(self, n_iter):
        self.clf.max_iter = n_iter
        self.clf.fit(self.X, self.y)

    def get_result(self):
        return self.clf.coef_.flatten()
コード例 #3
0
ファイル: decompose.py プロジェクト: caddyless/learn_pytroch
def fc_kernel(X,
              Y,
              copy_X=True,
              W=None,
              B=None,
              ret_reg=False,
              fit_intercept=True):
    """
    return: n c
    """
    assert copy_X == True
    assert len(X.shape) == 2
    if dcfgs.ls == cfgs.solvers.gd:
        w = Worker()

        def wo():
            from .GDsolver import fc_GD
            a, b = fc_GD(X, Y, W, B, n_iters=1)
            return {'a': a, 'b': b}

        outputs = w.do(wo)
        return outputs['a'], outputs['b']
    elif dcfgs.ls == cfgs.solvers.tls:
        return tls(X, Y, debug=True)
    elif dcfgs.ls == cfgs.solvers.keras:
        _reg = keras_kernel()
        _reg.fit(X, Y, W, B)
        return _reg.coef_, _reg.intercept_
    elif dcfgs.ls == cfgs.solvers.lightning:
        #_reg = SGDRegressor(eta0=1e-8, intercept_decay=0, alpha=0, verbose=2)
        _reg = CDRegressor(n_jobs=-1, alpha=0, verbose=2)
        if 0:
            _reg.intercept_ = B
            _reg.coef_ = W
    elif dcfgs.fc_ridge > 0:
        _reg = Ridge(alpha=dcfgs.fc_ridge)
    else:
        _reg = LinearRegression(n_jobs=-1,
                                copy_X=copy_X,
                                fit_intercept=fit_intercept)
    _reg.fit(X, Y)
    if ret_reg:
        return _reg
    return _reg.coef_, _reg.intercept_
コード例 #4
0
def fc_kernel(X, Y, copy_X=True, W=None, B=None, ret_reg=False,fit_intercept=True):
    """
    return: n c
    """
    assert copy_X == True
    assert len(X.shape) == 2
    if dcfgs.ls == cfgs.solvers.gd:
        w = Worker()
        def wo():
            from .GDsolver import fc_GD
            a,b=fc_GD(X,Y, W, B, n_iters=1)
            return {'a':a, 'b':b}
        outputs = w.do(wo)
        return outputs['a'], outputs['b']
    elif dcfgs.ls == cfgs.solvers.tls:
        return tls(X,Y, debug=True)
    elif dcfgs.ls == cfgs.solvers.keras:
        _reg=keras_kernel()
        _reg.fit(X, Y, W, B)
        return _reg.coef_, _reg.intercept_
    elif dcfgs.ls == cfgs.solvers.lightning:
        #_reg = SGDRegressor(eta0=1e-8, intercept_decay=0, alpha=0, verbose=2)
        _reg = CDRegressor(n_jobs=-1,alpha=0, verbose=2)
        if 0:
            _reg.intercept_=B
            _reg.coef_=W
    elif dcfgs.fc_ridge > 0:
        _reg = Ridge(alpha=dcfgs.fc_ridge)
    else:
        #redprint("fc_kernel entry here")
        _reg = LinearRegression(n_jobs=-1 , copy_X=copy_X, fit_intercept=fit_intercept)
    #redprint("[in fc_kernel],X.shape=%s,Y.shape=%s"%(str(X.shape),str(Y.shape)))
    _reg.fit(X, Y)
    #用LinearRegression这个库,拟合从x(66维)到y(64维)的线性隐射
    #其中Coefficients是系数部分,所以是个矩阵【64,66】:y=W*x',intercept是bias
    #print('Coefficients.shape:', _reg.coef_.shape)
    #print('intercept.shape : ', _reg.intercept_.shape)
    if ret_reg:
        return _reg
    return _reg.coef_, _reg.intercept_
コード例 #5
0
ファイル: lightning.py プロジェクト: tomMoral/benchmark_lasso
class Solver(BaseSolver):
    name = 'Lightning'

    install_cmd = 'conda'
    requirements = [
        'cython',
        'pip:git+https://github.com/scikit-learn-contrib/lightning.git'
    ]
    references = [
        'M. Blondel, K. Seki and K. Uehara, '
        '"Block coordinate descent algorithms for large-scale sparse '
        'multiclass classification" '
        'Mach. Learn., vol. 93, no. 1, pp. 31-52 (2013)'
    ]

    def skip(self, X, y, lmbd, fit_intercept):
        if fit_intercept:
            return True, f"{self.name} does not handle fit_intercept"

        return False, None

    def set_objective(self, X, y, lmbd, fit_intercept):
        self.X, self.y, self.lmbd = X, y, lmbd
        self.fit_intercept = fit_intercept

        self.clf = CDRegressor(
            loss='squared', penalty='l1', C=.5, alpha=self.lmbd,
            tol=1e-15
        )

    def run(self, n_iter):
        self.clf.max_iter = n_iter
        self.clf.fit(self.X, self.y)

    def get_result(self):
        beta = self.clf.coef_.flatten()
        if self.fit_intercept:
            beta = np.r_[beta, self.clf.intercept_]
        return beta