Ejemplo n.º 1
0
 def _posterior(self, X, Y, alpha0, w0):
     '''
     Iteratively refitted least squares method using l_bfgs_b or newton_cg.
     Finds MAP estimates for weights and Hessian at convergence point
     '''
     n_samples,n_features  = X.shape
     if self.solver == 'lbfgs_b':
         f = lambda w: _logistic_loss_and_grad(w,X[:,:-1],Y,alpha0)
         w = fmin_l_bfgs_b(f, x0 = w0, pgtol = self.tol_solver,
                           maxiter = self.n_iter_solver)[0]
     elif self.solver == 'newton_cg':
         f    = _logistic_loss
         grad = lambda w,*args: _logistic_loss_and_grad(w,*args)[1]
         hess = _logistic_grad_hess               
         args = (X[:,:-1],Y,alpha0)
         w    = newton_cg(hess, f, grad, w0, args=args,
                          maxiter=self.n_iter, tol=self.tol)[0]
     else:
         raise NotImplementedError('Liblinear solver is not yet implemented')
         
     # calculate negative of Hessian at w
     xw    = np.dot(X,w)
     s     = expit(xw)
     R     = s * (1 - s)
     Hess  = np.dot(X.T*R,X)    
     Alpha = np.ones(n_features)*alpha0
     if self.fit_intercept:
         Alpha[-1] = np.finfo(np.float16).eps
     np.fill_diagonal(Hess, np.diag(Hess) + Alpha)
     e  =  eigvalsh(Hess)        
     return w,1./e
Ejemplo n.º 2
0
    def _posterior(self, X, Y, alpha0, w0):
        '''
        Iteratively refitted least squares method using l_bfgs_b or newton_cg.
        Finds MAP estimates for weights and Hessian at convergence point
        '''
        n_samples, n_features = X.shape
        if self.solver == 'lbfgs_b':
            f = lambda w: _logistic_loss_and_grad(w, X[:, :-1], Y, alpha0)
            w = fmin_l_bfgs_b(f, x0=w0, pgtol=self.tol_solver,
                              maxiter=self.n_iter_solver)[0]
        elif self.solver == 'newton_cg':
            f = _logistic_loss
            grad = lambda w, *args: _logistic_loss_and_grad(w, *args)[1]
            hess = _logistic_grad_hess
            args = (X[:, :-1], Y, alpha0)
            w = newton_cg(hess, f, grad, w0, args=args,
                          maxiter=self.n_iter, tol=self.tol)[0]
        else:
            raise NotImplementedError('Liblinear solver is not yet implemented')

        # calculate negative of Hessian at w
        xw = np.dot(X, w)
        s = sigmoid(xw)
        R = s * (1 - s)
        Hess = np.dot(X.T * R, X)
        Alpha = np.ones(n_features) * alpha0
        if self.fit_intercept:
            Alpha[-1] = np.finfo(np.float16).eps
        np.fill_diagonal(Hess, np.diag(Hess) + Alpha)
        e = eigvalsh(Hess)
        return w, 1. / e
Ejemplo n.º 3
0
def test_newton_cg():
    rng = np.random.RandomState(0)
    A = rng.normal(size=(10, 10))
    x0 = np.ones(10)

    def func(x):
        Ax = A.dot(x)
        return .5 * (Ax).dot(Ax)

    def grad(x):
        return A.T.dot(A.dot(x))

    def grad_hess(x):
        return grad(x), lambda x: A.T.dot(A.dot(x))

    with pytest.warns(FutureWarning, match="removed in version 0.24"):
        newton_cg(grad_hess, func, grad, x0)
Ejemplo n.º 4
0
    def _posterior(self, X, Y, alpha0, w0, full_covar=False):
        '''
        Iteratively refitted least squares method using l_bfgs_b.
        Finds MAP estimates for weights and Hessian at convergence point
        '''
        if self.solver == 'lbfgs_b':
            f = lambda w: _logistic_loss_and_grad(w, X, Y, alpha0)
            w = fmin_l_bfgs_b(f,
                              x0=w0,
                              pgtol=self.tol_solver,
                              maxiter=self.n_iter_solver)[0]
        elif self.solver == 'newton_cg':
            f = _logistic_loss
            grad = lambda w, *args: _logistic_loss_and_grad(w, *args)[1]
            hess = _logistic_grad_hess
            args = (X, Y, alpha0)
            w = newton_cg(hess,
                          f,
                          grad,
                          w0,
                          args=args,
                          maxiter=self.n_iter,
                          tol=self.tol)[0]
        else:
            raise NotImplementedError(
                'Liblinear solver is not yet implemented')

        # calculate negative of Hessian at w
        if self.fit_intercept:
            XW = np.dot(X, w[:-1]) + w[-1]
        else:
            XW = np.dot(X, w)
        s = expit(XW)
        R = s * (1 - s)
        negHessian = np.dot(X.T * R, X)

        # do not regularise constant
        alpha_vec = np.zeros(negHessian.shape[0])
        alpha_vec = alpha0
        np.fill_diagonal(negHessian, np.diag(negHessian) + alpha_vec)
        if full_covar is False:
            eigs = 1. / eigvalsh(negHessian)
            return [w, eigs]
        else:
            inv = pinvh(negHessian)
            return [w, inv]
Ejemplo n.º 5
0
def test_newton_cg():
    # Test that newton_cg gives same result as scipy's fmin_ncg

    rng = np.random.RandomState(0)
    A = rng.normal(size=(10, 10))
    x0 = np.ones(10)

    def func(x):
        Ax = A.dot(x)
        return .5 * (Ax).dot(Ax)

    def grad(x):
        return A.T.dot(A.dot(x))

    def hess(x, p):
        return p.dot(A.T.dot(A.dot(x.all())))

    def func_grad_hess(x):
        return func(x), grad(x), lambda x: A.T.dot(A.dot(x))

    assert_array_almost_equal(newton_cg(func_grad_hess, func, grad, x0, tol=1e-10),
                              fmin_ncg(f=func, x0=x0, fprime=grad, fhess_p=hess))
 def _posterior(self, X, Y, alpha0, w0, full_covar = False):
     '''
     Iteratively refitted least squares method using l_bfgs_b.
     Finds MAP estimates for weights and Hessian at convergence point
     '''
     if self.solver == 'lbfgs_b':
         f = lambda w: _logistic_loss_and_grad(w,X,Y,alpha0)
         w = fmin_l_bfgs_b(f, x0 = w0, pgtol = self.tol_solver,
                           maxiter = self.n_iter_solver)[0]
     elif self.solver == 'newton_cg':
         f    = _logistic_loss
         grad = lambda w,*args: _logistic_loss_and_grad(w,*args)[1]
         hess = _logistic_grad_hess               
         args = (X,Y,alpha0)
         w    = newton_cg(hess, f, grad, w0, args=args,
                          maxiter=self.n_iter, tol=self.tol)[0]
     else:
         raise NotImplementedError('Liblinear solver is not yet implemented')
         
     # calculate negative of Hessian at w
     if self.fit_intercept:
         XW = np.dot(X,w[:-1]) + w[-1]
     else:
         XW = np.dot(X,w)
     s          = expit(XW)
     R          = s * (1 - s)
     negHessian = np.dot(X.T*R,X)
     
     # do not regularise constant
     alpha_vec     = np.zeros(negHessian.shape[0])
     alpha_vec     = alpha0   
     np.fill_diagonal(negHessian,np.diag(negHessian) + alpha_vec)
     if full_covar is False:
         eigs = 1./eigvalsh(negHessian)
         return [w,eigs]
     else:
         inv = pinvh(negHessian)
         return [w, inv]
Ejemplo n.º 7
0
def test_newton_cg():
    # Test that newton_cg gives same result as scipy's fmin_ncg

    rng = np.random.RandomState(0)
    A = rng.normal(size=(10, 10))
    x0 = np.ones(10)

    def func(x):
        Ax = A.dot(x)
        return .5 * (Ax).dot(Ax)

    def grad(x):
        return A.T.dot(A.dot(x))

    def hess(x, p):
        return p.dot(A.T.dot(A.dot(x.all())))

    def func_grad_hess(x):
        return func(x), grad(x), lambda x: A.T.dot(A.dot(x))

    assert_array_almost_equal(
        newton_cg(func_grad_hess, func, grad, x0, tol=1e-10),
        fmin_ncg(f=func, x0=x0, fprime=grad, fhess_p=hess))
Ejemplo n.º 8
0
def _logistic_regression_path(X,
                              y,
                              pos_class=None,
                              Cs=10,
                              fit_intercept=True,
                              max_iter=100,
                              tol=1e-4,
                              verbose=0,
                              solver='lbfgs',
                              coef=None,
                              class_weight=None,
                              dual=False,
                              penalty='l2',
                              intercept_scaling=1.,
                              multi_class='warn',
                              random_state=None,
                              check_input=True,
                              max_squared_sum=None,
                              sample_weight=None,
                              l1_ratio=None):
    """Compute a Logistic Regression model for a list of regularization
    parameters.

    This is an implementation that uses the result of the previous model
    to speed up computations along the set of solutions, making it faster
    than sequentially calling LogisticRegression for the different parameters.
    Note that there will be no speedup with liblinear solver, since it does
    not handle warm-starting.

    Read more in the :ref:`User Guide <logistic_regression>`.

    Parameters
    ----------
    X : array-like or sparse matrix, shape (n_samples, n_features)
        Input data.

    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Input data, target values.

    pos_class : int, None
        The class with respect to which we perform a one-vs-all fit.
        If None, then it is assumed that the given problem is binary.

    Cs : int | array-like, shape (n_cs,)
        List of values for the regularization parameter or integer specifying
        the number of regularization parameters that should be used. In this
        case, the parameters will be chosen in a logarithmic scale between
        1e-4 and 1e4.

    fit_intercept : bool
        Whether to fit an intercept for the model. In this case the shape of
        the returned array is (n_cs, n_features + 1).

    max_iter : int
        Maximum number of iterations for the solver.

    tol : float
        Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
        will stop when ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    verbose : int
        For the liblinear and lbfgs solvers set verbose to any positive
        number for verbosity.

    solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
        Numerical solver to use.

    coef : array-like, shape (n_features,), default None
        Initialization value for coefficients of logistic regression.
        Useless for liblinear solver.

    class_weight : dict or 'balanced', optional
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    dual : bool
        Dual or primal formulation. Dual formulation is only implemented for
        l2 penalty with liblinear solver. Prefer dual=False when
        n_samples > n_features.

    penalty : str, 'l1', 'l2', or 'elasticnet'
        Used to specify the norm used in the penalization. The 'newton-cg',
        'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
        only supported by the 'saga' solver.

    intercept_scaling : float, default 1.
        Useful only when the solver 'liblinear' is used
        and self.fit_intercept is set to True. In this case, x becomes
        [x, self.intercept_scaling],
        i.e. a "synthetic" feature with constant value equal to
        intercept_scaling is appended to the instance vector.
        The intercept becomes ``intercept_scaling * synthetic_feature_weight``.

        Note! the synthetic feature weight is subject to l1/l2 regularization
        as all other features.
        To lessen the effect of regularization on synthetic feature weight
        (and therefore on the intercept) intercept_scaling has to be increased.

    multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
        If the option chosen is 'ovr', then a binary problem is fit for each
        label. For 'multinomial' the loss minimised is the multinomial loss fit
        across the entire probability distribution, *even when the data is
        binary*. 'multinomial' is unavailable when solver='liblinear'.
        'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
        and otherwise selects 'multinomial'.

        .. versionadded:: 0.18
           Stochastic Average Gradient descent solver for 'multinomial' case.
        .. versionchanged:: 0.20
            Default will change from 'ovr' to 'auto' in 0.22.

    random_state : int, RandomState instance or None, optional, default None
        The seed of the pseudo random number generator to use when shuffling
        the data.  If int, random_state is the seed used by the random number
        generator; If RandomState instance, random_state is the random number
        generator; If None, the random number generator is the RandomState
        instance used by `np.random`. Used when ``solver`` == 'sag' or
        'liblinear'.

    check_input : bool, default True
        If False, the input arrays X and y will not be checked.

    max_squared_sum : float, default None
        Maximum squared sum of X over samples. Used only in SAG solver.
        If None, it will be computed, going through all the samples.
        The value should be precomputed to speed up cross validation.

    sample_weight : array-like, shape(n_samples,) optional
        Array of weights that are assigned to individual samples.
        If not provided, then each sample is given unit weight.

    l1_ratio : float or None, optional (default=None)
        The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
        used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
        to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
        to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
        combination of L1 and L2.

    Returns
    -------
    coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
        List of coefficients for the Logistic Regression model. If
        fit_intercept is set to True then the second dimension will be
        n_features + 1, where the last item represents the intercept. For
        ``multiclass='multinomial'``, the shape is (n_classes, n_cs,
        n_features) or (n_classes, n_cs, n_features + 1).

    Cs : ndarray
        Grid of Cs used for cross-validation.

    n_iter : array, shape (n_cs,)
        Actual number of iteration for each Cs.

    Notes
    -----
    You might get slightly different results with the solver liblinear than
    with the others since this uses LIBLINEAR which penalizes the intercept.

    .. versionchanged:: 0.19
        The "copy" parameter was removed.
    """
    if isinstance(Cs, numbers.Integral):
        Cs = np.logspace(-4, 4, Cs)

    solver = _check_solver(solver, penalty, dual)

    # Preprocessing.
    if check_input:
        X = check_array(X,
                        accept_sparse='csr',
                        dtype=np.float64,
                        accept_large_sparse=solver != 'liblinear')
        y = check_array(y, ensure_2d=False, dtype=None)
        check_consistent_length(X, y)
    _, n_features = X.shape

    classes = np.unique(y)
    random_state = check_random_state(random_state)

    multi_class = _check_multi_class(multi_class, solver, len(classes))
    if pos_class is None and multi_class != 'multinomial':
        if (classes.size > 2):
            raise ValueError('To fit OvR, use the pos_class argument')
        # np.unique(y) gives labels in sorted order.
        pos_class = classes[1]

    # If sample weights exist, convert them to array (support for lists)
    # and check length
    # Otherwise set them to 1 for all examples
    if sample_weight is not None:
        sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
        check_consistent_length(y, sample_weight)
        default_weights = False
    else:
        sample_weight = np.ones(X.shape[0], dtype=X.dtype)
        default_weights = (class_weight is None)

    daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'
                                         ] and not sparse.issparse(X)
    # If class_weights is a dict (provided by the user), the weights
    # are assigned to the original labels. If it is "balanced", then
    # the class_weights are assigned after masking the labels with a OvR.
    le = LabelEncoder()
    if isinstance(class_weight, dict) or multi_class == 'multinomial':
        class_weight_ = compute_class_weight(class_weight, classes, y)
        sample_weight *= class_weight_[le.fit_transform(y)]

    # For doing a ovr, we need to mask the labels first. for the
    # multinomial case this is not necessary.
    if multi_class == 'ovr':
        w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
        mask_classes = np.array([-1, 1])
        mask = (y == pos_class)
        y_bin = np.ones(y.shape, dtype=X.dtype)
        y_bin[~mask] = -1.
        # for compute_class_weight

        if class_weight == "balanced":
            class_weight_ = compute_class_weight(class_weight, mask_classes,
                                                 y_bin)
            sample_weight *= class_weight_[le.fit_transform(y_bin)]

        daal_ready = daal_ready and (default_weights or np.allclose(
            sample_weight, np.ones_like(sample_weight)))
        if daal_ready:
            w0 = np.zeros(n_features + 1, dtype=X.dtype)
            y_bin[~mask] = 0.
        else:
            w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)

    else:
        daal_ready = daal_ready and (default_weights or np.allclose(
            sample_weight, np.ones_like(sample_weight)))

        if solver not in ['sag', 'saga']:
            if daal_ready:
                Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
            else:
                lbin = LabelBinarizer()
                Y_multi = lbin.fit_transform(y)
                if Y_multi.shape[1] == 1:
                    Y_multi = np.hstack([1 - Y_multi, Y_multi])
        else:
            # SAG multinomial solver needs LabelEncoder, not LabelBinarizer
            le = LabelEncoder()
            Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)

        if daal_ready:
            w0 = np.zeros((classes.size, n_features + 1),
                          order='C',
                          dtype=X.dtype)
        else:
            w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
                          order='F',
                          dtype=X.dtype)

    if coef is not None:
        # it must work both giving the bias term and not
        if multi_class == 'ovr':
            if coef.size not in (n_features, w0.size):
                raise ValueError(
                    'Initialization coef is of shape %d, expected shape '
                    '%d or %d' % (coef.size, n_features, w0.size))
            if daal_ready:
                w0[-coef.size:] = np.roll(
                    coef, 1, -1) if coef.size != n_features else coef
            else:
                w0[:coef.size] = coef
        else:
            # For binary problems coef.shape[0] should be 1, otherwise it
            # should be classes.size.
            n_classes = classes.size
            if n_classes == 2:
                n_classes = 1

            if (coef.shape[0] != n_classes
                    or coef.shape[1] not in (n_features, n_features + 1)):
                raise ValueError(
                    'Initialization coef is of shape (%d, %d), expected '
                    'shape (%d, %d) or (%d, %d)' %
                    (coef.shape[0], coef.shape[1], classes.size, n_features,
                     classes.size, n_features + 1))

            if daal_ready:
                w0[:, -coef.shape[1]:] = np.roll(
                    coef, 1, -1) if coef.shape[1] != n_features else coef
            else:
                if n_classes == 1:
                    w0[0, :coef.shape[1]] = -coef
                    w0[1, :coef.shape[1]] = coef
                else:
                    w0[:, :coef.shape[1]] = coef

    C_daal_multiplier = 1
    # commented out because this is Py3 feature
    #def _map_to_binary_logistic_regression():
    #    nonlocal C_daal_multiplier
    #    nonlocal w0
    #    C_daal_multiplier = 2
    #    w0 *= 2

    if multi_class == 'multinomial':
        # fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
        if solver in ['lbfgs', 'newton-cg']:
            if daal_ready and classes.size == 2:
                w0_saved = w0
                w0 = w0[-1:, :]
            w0 = w0.ravel()
        target = Y_multi
        if solver == 'lbfgs':
            if daal_ready:
                if classes.size == 2:
                    # _map_to_binary_logistic_regression()
                    C_daal_multiplier = 2
                    w0 *= 2
                    daal_extra_args_func = _daal4py_logistic_loss_extra_args
                else:
                    daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
                func = _daal4py_loss_and_grad
            else:
                func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
        elif solver == 'newton-cg':
            if daal_ready:
                if classes.size == 2:
                    # _map_to_binary_logistic_regression()
                    C_daal_multiplier = 2
                    w0 *= 2
                    daal_extra_args_func = _daal4py_logistic_loss_extra_args
                else:
                    daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
                func = _daal4py_loss_
                grad = _daal4py_grad_
                hess = _daal4py_grad_hess_
            else:
                func = lambda x, *args: _multinomial_loss(x, *args)[0]
                grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
                hess = _multinomial_grad_hess
        warm_start_sag = {'coef': w0.T}
    else:
        target = y_bin
        if solver == 'lbfgs':
            if daal_ready:
                func = _daal4py_loss_and_grad
                daal_extra_args_func = _daal4py_logistic_loss_extra_args
            else:
                func = _logistic_loss_and_grad
        elif solver == 'newton-cg':
            if daal_ready:
                daal_extra_args_func = _daal4py_logistic_loss_extra_args
                func = _daal4py_loss_
                grad = _daal4py_grad_
                hess = _daal4py_grad_hess_
            else:
                func = _logistic_loss
                grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
                hess = _logistic_grad_hess
        warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}

    coefs = list()
    n_iter = np.zeros(len(Cs), dtype=np.int32)
    for i, C in enumerate(Cs):
        if solver == 'lbfgs':
            if daal_ready:
                extra_args = daal_extra_args_func(classes.size,
                                                  w0,
                                                  X,
                                                  target,
                                                  0.,
                                                  0.5 / C / C_daal_multiplier,
                                                  fit_intercept,
                                                  value=True,
                                                  gradient=True,
                                                  hessian=False)
            else:
                extra_args = (X, target, 1. / C, sample_weight)

            iprint = [-1, 50, 1, 100,
                      101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
            w0, loss, info = optimize.fmin_l_bfgs_b(func,
                                                    w0,
                                                    fprime=None,
                                                    args=extra_args,
                                                    iprint=iprint,
                                                    pgtol=tol,
                                                    maxiter=max_iter)
            if daal_ready and C_daal_multiplier == 2:
                w0 *= 0.5
            if info["warnflag"] == 1:
                warnings.warn(
                    "lbfgs failed to converge. Increase the number "
                    "of iterations.", ConvergenceWarning)
            # In scipy <= 1.0.0, nit may exceed maxiter.
            # See https://github.com/scipy/scipy/issues/7854.
            n_iter_i = min(info['nit'], max_iter)
        elif solver == 'newton-cg':
            if daal_ready:

                def make_ncg_funcs(f,
                                   value=False,
                                   gradient=False,
                                   hessian=False):
                    daal_penaltyL2 = 0.5 / C / C_daal_multiplier
                    _obj_, X_, y_, n_samples = daal_extra_args_func(
                        classes.size,
                        w0,
                        X,
                        target,
                        0.,
                        daal_penaltyL2,
                        fit_intercept,
                        value=value,
                        gradient=gradient,
                        hessian=hessian)
                    _func_ = lambda x, *args: f(x, _obj_, *args)
                    return _func_, (X_, y_, n_samples, daal_penaltyL2)

                loss_func, extra_args = make_ncg_funcs(func, value=True)
                grad_func, _ = make_ncg_funcs(grad, gradient=True)
                grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
                w0, n_iter_i = newton_cg(grad_hess_func,
                                         loss_func,
                                         grad_func,
                                         w0,
                                         args=extra_args,
                                         maxiter=max_iter,
                                         tol=tol)
            else:
                args = (X, target, 1. / C, sample_weight)
                w0, n_iter_i = newton_cg(hess,
                                         func,
                                         grad,
                                         w0,
                                         args=args,
                                         maxiter=max_iter,
                                         tol=tol)
        elif solver == 'liblinear':
            coef_, intercept_, n_iter_i, = _fit_liblinear(
                X,
                target,
                C,
                fit_intercept,
                intercept_scaling,
                None,
                penalty,
                dual,
                verbose,
                max_iter,
                tol,
                random_state,
                sample_weight=sample_weight)
            if fit_intercept:
                w0 = np.concatenate([coef_.ravel(), intercept_])
            else:
                w0 = coef_.ravel()

        elif solver in ['sag', 'saga']:
            if multi_class == 'multinomial':
                target = target.astype(X.dtype, copy=False)
                loss = 'multinomial'
            else:
                loss = 'log'
            # alpha is for L2-norm, beta is for L1-norm
            if penalty == 'l1':
                alpha = 0.
                beta = 1. / C
            elif penalty == 'l2':
                alpha = 1. / C
                beta = 0.
            else:  # Elastic-Net penalty
                alpha = (1. / C) * (1 - l1_ratio)
                beta = (1. / C) * l1_ratio

            w0, n_iter_i, warm_start_sag = sag_solver(
                X,
                target,
                sample_weight,
                loss,
                alpha,
                beta,
                max_iter,
                tol,
                verbose,
                random_state,
                False,
                max_squared_sum,
                warm_start_sag,
                is_saga=(solver == 'saga'))

        else:
            raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
                             "'newton-cg', 'sag'}, got '%s' instead" % solver)

        if multi_class == 'multinomial':
            if daal_ready:
                if classes.size == 2:
                    multi_w0 = w0[np.newaxis, :]
                else:
                    multi_w0 = np.reshape(w0, (classes.size, -1))
            else:
                n_classes = max(2, classes.size)
                multi_w0 = np.reshape(w0, (n_classes, -1))
                if n_classes == 2:
                    multi_w0 = multi_w0[1][np.newaxis, :]
            coefs.append(np.require(multi_w0, requirements='O'))
        else:
            coefs.append(np.require(w0, requirements='O'))

        n_iter[i] = n_iter_i

    if daal_ready:
        if fit_intercept:
            for i, ci in enumerate(coefs):
                coefs[i] = np.roll(ci, -1, -1)
        else:
            for i, ci in enumerate(coefs):
                coefs[i] = np.delete(ci, 0, axis=-1)

    return np.array(coefs), np.array(Cs), n_iter
Ejemplo n.º 9
0
def logistic_regression(X,
                        y,
                        fit_intercept=True,
                        C=1e4,
                        max_iter=100,
                        tol=1e-4,
                        verbose=0,
                        solver='lbfgs',
                        coef=None,
                        class_weight=None,
                        dual=False,
                        penalty='l2',
                        intercept_scaling=1.,
                        random_state=None,
                        check_input=True,
                        max_squared_sum=None,
                        sample_weight=None):
    """Compute a Logistic Regression for possibly soft class labels y

    Based on logistic_regression_path, but assumes multinomial and removes multiple Cs logic

    Parameters
    ----------
    X : array-like or sparse matrix, shape (n_samples, n_features)
        Input data.

    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Input data, target values.

    C : float
        regularization parameter that should be used. Default is 1e4.

    fit_intercept : bool
        Whether to fit an intercept for the model. In this case the shape of
        the returned array is (n_cs, n_features + 1).

    max_iter : int
        Maximum number of iterations for the solver.

    tol : float
        Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
        will stop when ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    verbose : int
        For the liblinear and lbfgs solvers set verbose to any positive
        number for verbosity.

    solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
        Numerical solver to use.

    coef : array-like, shape (n_features,), default None
        Initialization value for coefficients of logistic regression.
        Useless for liblinear solver.

    class_weight : dict or 'balanced', optional
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    dual : bool
        Dual or primal formulation. Dual formulation is only implemented for
        l2 penalty with liblinear solver. Prefer dual=False when
        n_samples > n_features.

    penalty : str, 'l1' or 'l2'
        Used to specify the norm used in the penalization. The 'newton-cg',
        'sag' and 'lbfgs' solvers support only l2 penalties.

    intercept_scaling : float, default 1.
        Useful only when the solver 'liblinear' is used
        and self.fit_intercept is set to True. In this case, x becomes
        [x, self.intercept_scaling],
        i.e. a "synthetic" feature with constant value equal to
        intercept_scaling is appended to the instance vector.
        The intercept becomes ``intercept_scaling * synthetic_feature_weight``.

        Note! the synthetic feature weight is subject to l1/l2 regularization
        as all other features.
        To lessen the effect of regularization on synthetic feature weight
        (and therefore on the intercept) intercept_scaling has to be increased.

    random_state : int, RandomState instance or None, optional, default None
        The seed of the pseudo random number generator to use when shuffling
        the data.  If int, random_state is the seed used by the random number
        generator; If RandomState instance, random_state is the random number
        generator; If None, the random number generator is the RandomState
        instance used by `np.random`. Used when ``solver`` == 'sag' or
        'liblinear'.

    check_input : bool, default True
        If False, the input arrays X and y will not be checked.

    max_squared_sum : float, default None
        Maximum squared sum of X over samples. Used only in SAG solver.
        If None, it will be computed, going through all the samples.
        The value should be precomputed to speed up cross validation.

    sample_weight : array-like, shape(n_samples,) optional
        Array of weights that are assigned to individual samples.
        If not provided, then each sample is given unit weight.

    Returns
    -------
    coef : ndarray, shape (n_classes, n_features) or (n_classes, n_features + 1,)
        List of coefficients for the Logistic Regression model. If
        fit_intercept is set to True then the second dimension will be
        n_features + 1, where the last item represents the intercept.

    Notes
    -----
    You might get slightly different results with the solver liblinear than
    with the others since this uses LIBLINEAR which penalizes the intercept.
    """

    solver = _check_solver(solver, penalty, dual)

    # Preprocessing.
    if check_input:
        X = check_array(X,
                        accept_sparse='csr',
                        dtype=np.float64,
                        accept_large_sparse=solver != 'liblinear')
        y = check_array(y, ensure_2d=False, dtype=None)
        check_consistent_length(X, y)
    _, n_features = X.shape

    random_state = check_random_state(random_state)

    if len(y.shape) == 1:
        le = LabelBinarizer()
        y = le.fit_transform(y).astype(X.dtype, copy=False)

    # If sample weights exist, convert them to array (support for lists)
    # and check length
    # Otherwise set them to 1 for all examples
    if sample_weight is not None:
        sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
        check_consistent_length(y, sample_weight)
    else:
        sample_weight = np.ones(X.shape[0], dtype=X.dtype)

    # If class_weights is a dict (provided by the user), the weights
    # are assigned to the original labels. If it is "balanced", then
    # the class_weights are assigned after masking the labels with a OvR.
    class_weight_ = compute_class_weight(class_weight, y)
    sample_weight *= class_weight_

    Y_multi = y
    nclasses = Y_multi.shape[1]

    w0 = np.zeros((nclasses, n_features + int(fit_intercept)),
                  order='F',
                  dtype=X.dtype)

    if coef is not None:
        # it must work both giving the bias term and not

        # For binary problems coef.shape[0] should be 1, otherwise it
        # should be nclasses.
        if nclasses == 2:
            nclasses = 1

        if (coef.shape[0] != nclasses
                or coef.shape[1] not in (n_features, n_features + 1)):
            raise ValueError(
                'Initialization coef is of shape (%d, %d), expected '
                'shape (%d, %d) or (%d, %d)' %
                (coef.shape[0], coef.shape[1], nclasses, n_features, nclasses,
                 n_features + 1))

        if nclasses == 1:
            w0[0, :coef.shape[1]] = -coef
            w0[1, :coef.shape[1]] = coef
        else:
            w0[:, :coef.shape[1]] = coef

    # fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
    if solver in ['lbfgs', 'newton-cg']:
        w0 = w0.ravel()
    target = Y_multi
    warm_start_sag = {'coef': w0.T}

    if solver == 'lbfgs':
        func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
        iprint = [-1, 50, 1, 100, 101][np.searchsorted(np.array([0, 1, 2, 3]),
                                                       verbose)]

        w0, loss, info = optimize.fmin_l_bfgs_b(func,
                                                w0,
                                                fprime=None,
                                                args=(X, target, 1. / C,
                                                      sample_weight),
                                                iprint=iprint,
                                                pgtol=tol,
                                                maxiter=max_iter)
        if info["warnflag"] == 1:
            warnings.warn(
                "lbfgs failed to converge. Increase the number "
                "of iterations.", ConvergenceWarning)

    elif solver == 'newton-cg':
        func = lambda x, *args: _multinomial_loss(x, *args)[0]
        grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
        hess = _multinomial_grad_hess

        args = (X, target, 1. / C, sample_weight)
        w0, n_iter_i = newton_cg(hess,
                                 func,
                                 grad,
                                 w0,
                                 args=args,
                                 maxiter=max_iter,
                                 tol=tol)

    elif solver == 'liblinear':
        coef_, intercept_, n_iter_i, = _fit_liblinear(
            X,
            target,
            C,
            fit_intercept,
            intercept_scaling,
            None,
            penalty,
            dual,
            verbose,
            max_iter,
            tol,
            random_state,
            sample_weight=sample_weight)
        if fit_intercept:
            w0 = np.concatenate([coef_.ravel(), intercept_])
        else:
            w0 = coef_.ravel()

    elif solver in ['sag', 'saga']:
        target = target.astype(np.float64)
        loss = 'multinomial'
        if penalty == 'l1':
            alpha = 0.
            beta = 1. / C
        else:
            alpha = 1. / C
            beta = 0.
        w0, n_iter_i, warm_start_sag = sag_solver(X,
                                                  target,
                                                  sample_weight,
                                                  loss,
                                                  alpha,
                                                  beta,
                                                  max_iter,
                                                  tol,
                                                  verbose,
                                                  random_state,
                                                  False,
                                                  max_squared_sum,
                                                  warm_start_sag,
                                                  is_saga=(solver == 'saga'))

    else:
        raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
                         "'newton-cg', 'sag'}, got '%s' instead" % solver)

    nclasses = max(2, nclasses)
    multi_w0 = np.reshape(w0, (nclasses, -1))
    if nclasses == 2:
        multi_w0 = multi_w0[1][np.newaxis, :]
    coef = multi_w0.copy()

    return coef
Ejemplo n.º 10
0
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
                             max_iter=100, tol=1e-4, verbose=0,
                             solver='lbfgs', coef=None, copy=False,
                             class_weight=None, dual=False, penalty='l2',
                             intercept_scaling=1., multi_class='ovr',
                             random_state=None, check_input=True,
                             max_squared_sum=None, sample_weight=None, rho=None, q=None):
    """Compute a Logistic Regression model for a list of regularization
    parameters.

    This is an implementation that uses the result of the previous model
    to speed up computations along the set of solutions, making it faster
    than sequentially calling LogisticRegression for the different parameters.

    Read more in the :ref:`User Guide <logistic_regression>`.

    Parameters
    ----------
    X : array-like or sparse matrix, shape (n_samples, n_features)
        Input data.

    y : array-like, shape (n_samples,)
        Input data, target values.

    Cs : int | array-like, shape (n_cs,)
        List of values for the regularization parameter or integer specifying
        the number of regularization parameters that should be used. In this
        case, the parameters will be chosen in a logarithmic scale between
        1e-4 and 1e4.

    pos_class : int, None
        The class with respect to which we perform a one-vs-all fit.
        If None, then it is assumed that the given problem is binary.

    fit_intercept : bool
        Whether to fit an intercept for the model. In this case the shape of
        the returned array is (n_cs, n_features + 1).

    max_iter : int
        Maximum number of iterations for the solver.

    tol : float
        Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
        will stop when ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    verbose : int
        For the liblinear and lbfgs solvers set verbose to any positive
        number for verbosity.

    solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
        Numerical solver to use.

    coef : array-like, shape (n_features,), default None
        Initialization value for coefficients of logistic regression.
        Useless for liblinear solver.

    copy : bool, default False
        Whether or not to produce a copy of the data. A copy is not required
        anymore. This parameter is deprecated and will be removed in 0.19.

    class_weight : dict or 'balanced', optional
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    dual : bool
        Dual or primal formulation. Dual formulation is only implemented for
        l2 penalty with liblinear solver. Prefer dual=False when
        n_samples > n_features.

    penalty : str, 'l1' or 'l2'
        Used to specify the norm used in the penalization. The 'newton-cg',
        'sag' and 'lbfgs' solvers support only l2 penalties.

    intercept_scaling : float, default 1.
        This parameter is useful only when the solver 'liblinear' is used
        and self.fit_intercept is set to True. In this case, x becomes
        [x, self.intercept_scaling],
        i.e. a "synthetic" feature with constant value equals to
        intercept_scaling is appended to the instance vector.
        The intercept becomes intercept_scaling * synthetic feature weight
        Note! the synthetic feature weight is subject to l1/l2 regularization
        as all other features.
        To lessen the effect of regularization on synthetic feature weight
        (and therefore on the intercept) intercept_scaling has to be increased.

    multi_class : str, {'ovr', 'multinomial'}
        Multiclass option can be either 'ovr' or 'multinomial'. If the option
        chosen is 'ovr', then a binary problem is fit for each label. Else
        the loss minimised is the multinomial loss fit across
        the entire probability distribution. Works only for the 'lbfgs' and
        'newton-cg' solvers.

    random_state : int seed, RandomState instance, or None (default)
        The seed of the pseudo random number generator to use when
        shuffling the data.

    check_input : bool, default True
        If False, the input arrays X and y will not be checked.

    max_squared_sum : float, default None
        Maximum squared sum of X over samples. Used only in SAG solver.
        If None, it will be computed, going through all the samples.
        The value should be precomputed to speed up cross validation.

    sample_weight : array-like, shape(n_samples,) optional
        Array of weights that are assigned to individual samples.
        If not provided, then each sample is given unit weight.

    Returns
    -------
    coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
        List of coefficients for the Logistic Regression model. If
        fit_intercept is set to True then the second dimension will be
        n_features + 1, where the last item represents the intercept.

    Cs : ndarray
        Grid of Cs used for cross-validation.

    n_iter : array, shape (n_cs,)
        Actual number of iteration for each Cs.

    Notes
    -----
    You might get slighly different results with the solver liblinear than
    with the others since this uses LIBLINEAR which penalizes the intercept.
    """
    if copy:
        warnings.warn("A copy is not required anymore. The 'copy' parameter "
                      "is deprecated and will be removed in 0.19.",
                      DeprecationWarning)

    if isinstance(Cs, numbers.Integral):
        Cs = np.logspace(-4, 4, Cs)

    _check_solver_option(solver, multi_class, penalty, dual)

    # Preprocessing.
    if check_input or copy:
        X = check_array(X, accept_sparse='csr', dtype=np.float64)
        y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
        check_consistent_length(X, y)
    _, n_features = X.shape
    classes = np.unique(y)
    random_state = check_random_state(random_state)

    if pos_class is None and multi_class != 'multinomial':
        if (classes.size > 2):
            raise ValueError('To fit OvR, use the pos_class argument')
        # np.unique(y) gives labels in sorted order.
        pos_class = classes[1]

    # If sample weights exist, convert them to array (support for lists)
    # and check length
    # Otherwise set them to 1 for all examples
    if sample_weight is not None:
        sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
        check_consistent_length(y, sample_weight)
    else:
        sample_weight = np.ones(X.shape[0])

    # If class_weights is a dict (provided by the user), the weights
    # are assigned to the original labels. If it is "balanced", then
    # the class_weights are assigned after masking the labels with a OvR.
    le = LabelEncoder()

    if isinstance(class_weight, dict) or multi_class == 'multinomial':
        if solver == "liblinear":
            if classes.size == 2:
                # Reconstruct the weights with keys 1 and -1
                temp = {1: class_weight[pos_class],
                        -1: class_weight[classes[0]]}
                class_weight = temp.copy()
            else:
                raise ValueError("In LogisticRegressionCV the liblinear "
                                 "solver cannot handle multiclass with "
                                 "class_weight of type dict. Use the lbfgs, "
                                 "newton-cg or sag solvers or set "
                                 "class_weight='balanced'")
        else:
            class_weight_ = compute_class_weight(class_weight, classes, y)
            sample_weight *= class_weight_[le.fit_transform(y)]

    # For doing a ovr, we need to mask the labels first. for the
    # multinomial case this is not necessary.
    if multi_class == 'ovr':
        w0 = np.zeros(n_features + int(fit_intercept))
        mask_classes = np.array([-1, 1])
        mask = (y == pos_class)
        y_bin = np.ones(y.shape, dtype=np.float64)
        y_bin[~mask] = -1.
        # for compute_class_weight

        # 'auto' is deprecated and will be removed in 0.19
        if class_weight in ("auto", "balanced"):
            class_weight_ = compute_class_weight(class_weight, mask_classes,
                                                 y_bin)
            sample_weight *= class_weight_[le.fit_transform(y_bin)]

    else:
        lbin = LabelBinarizer()
        Y_binarized = lbin.fit_transform(y)
        if Y_binarized.shape[1] == 1:
            Y_binarized = np.hstack([1 - Y_binarized, Y_binarized])
        w0 = np.zeros((Y_binarized.shape[1], n_features + int(fit_intercept)),
                      order='F')

    if coef is not None:
        # it must work both giving the bias term and not
        if multi_class == 'ovr':
            if coef.size not in (n_features, w0.size):
                raise ValueError(
                    'Initialization coef is of shape %d, expected shape '
                    '%d or %d' % (coef.size, n_features, w0.size))
            w0[:coef.size] = coef
        else:
            # For binary problems coef.shape[0] should be 1, otherwise it
            # should be classes.size.
            n_vectors = classes.size
            if n_vectors == 2:
                n_vectors = 1

            if (coef.shape[0] != n_vectors or
                    coef.shape[1] not in (n_features, n_features + 1)):
                raise ValueError(
                    'Initialization coef is of shape (%d, %d), expected '
                    'shape (%d, %d) or (%d, %d)' % (
                        coef.shape[0], coef.shape[1], classes.size,
                        n_features, classes.size, n_features + 1))
            w0[:, :coef.shape[1]] = coef

    if multi_class == 'multinomial':
        # fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
        w0 = w0.ravel()
        target = Y_binarized
        if solver == 'lbfgs':
            func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
        elif solver == 'newton-cg':
            func = lambda x, *args: _multinomial_loss(x, *args)[0]
            grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
            hess = _multinomial_grad_hess
    else:
        target = y_bin
        if solver == 'lbfgs':
            func = lambda *args: _logistic_loss_and_grad(rho=rho, q=q, *args)
        elif solver == 'newton-cg':
            func = _logistic_loss
            grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
            hess = _logistic_grad_hess

    coefs = list()
    warm_start_sag = {'coef': w0}
    n_iter = np.zeros(len(Cs), dtype=np.int32)
    for i, C in enumerate(Cs):
        if solver == 'lbfgs':
            try:
                w0, loss, info = optimize.fmin_l_bfgs_b(
                    func, w0, fprime=None,
                    args=(X, target, 1. / C, sample_weight),
                    iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
            except TypeError:
                # old scipy doesn't have maxiter
                w0, loss, info = optimize.fmin_l_bfgs_b(
                    func, w0, fprime=None,
                    args=(X, target, 1. / C, sample_weight),
                    iprint=(verbose > 0) - 1, pgtol=tol)
            if info["warnflag"] == 1 and verbose > 0:
                warnings.warn("lbfgs failed to converge. Increase the number "
                              "of iterations.")
            try:
                n_iter_i = info['nit'] - 1
            except:
                n_iter_i = info['funcalls'] - 1
        elif solver == 'newton-cg':
            args = (X, target, 1. / C, sample_weight)
            w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
                                     maxiter=max_iter, tol=tol)
        elif solver == 'liblinear':
            coef_, intercept_, n_iter_i, = _fit_liblinear(
                X, target, C, fit_intercept, intercept_scaling, class_weight,
                penalty, dual, verbose, max_iter, tol, random_state)
            if fit_intercept:
                w0 = np.concatenate([coef_.ravel(), intercept_])
            else:
                w0 = coef_.ravel()

        elif solver == 'sag':
            w0, n_iter_i, warm_start_sag = sag_solver(
                X, target, sample_weight, 'log', 1. / C, max_iter, tol,
                verbose, random_state, False, max_squared_sum,
                warm_start_sag)
        else:
            raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
                             "'newton-cg', 'sag'}, got '%s' instead" % solver)

        if multi_class == 'multinomial':
            multi_w0 = np.reshape(w0, (classes.size, -1))
            if classes.size == 2:
                multi_w0 = multi_w0[1][np.newaxis, :]
            coefs.append(multi_w0)
        else:
            coefs.append(w0.copy())

        n_iter[i] = n_iter_i

    return coefs, np.array(Cs), n_iter
Ejemplo n.º 11
0
    def fit(self, X, Y, sample_weight = None):
        """
        fit the weighted model
        Parameters
        ----------
        X : design matrix
        Y : response matrix
        sample_weight: sample weight vector

        """
        
        if sample_weight is None:
            sample_weight = np.ones((X.shape[0],))
        
        assert X.shape[0] == Y.shape[0]
        assert X.shape[0] == sample_weight.shape[0]
        
        
        if X.ndim == 1:
            X = X.reshape(-1,1)
        if self.fit_intercept:
            X = addIntercept(X)
        self.n_samples = X.shape[0]
        self.n_features = X.shape[1]
        self.n_targets = Y.shape[1]
        
        if self.n_targets < 2:
            raise ValueError('n_targets < 2')
        
        w0 = np.zeros((self.n_targets*self.n_features, ))

        if self.solver == 'lbfgs':
            func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
        else:
            func = lambda x, *args: _multinomial_loss(x, *args)[0]
            grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
            hess = _multinomial_grad_hess

        if self.solver == 'lbfgs':
            try:
                w0, loss, info = optimize.fmin_l_bfgs_b(
                    func, w0, fprime=None,
                    args=(X, Y, self.reg, sample_weight),
                    iprint=0, pgtol=self.tol, maxiter=self.max_iter)
            except TypeError:
                # old scipy doesn't have maxiter
                w0, loss, info = optimize.fmin_l_bfgs_b(
                    func, w0, fprime=None,
                    args=(X, Y, self.reg, sample_weight),
                    iprint=0, pgtol=self.tol)
            if info["warnflag"] == 1:
                warnings.warn("lbfgs failed to converge. Increase the number "
                              "of iterations.")
            try:
                n_iter_i = info['nit'] - 1
            except:
                n_iter_i = info['funcalls'] - 1
        else:
            args = (X, Y, self.reg, sample_weight)
            w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
                                             maxiter=self.max_iter, tol=self.tol)
            

    
  
        w1 = w0.reshape(self.n_targets, -1)
        w1 = w1.T - w1.T[:,0].reshape(-1,1)
        self.coef = w1
        self.converged = n_iter_i < self.max_iter
        self.dispersion = self.estimate_dispersion()
        if self.est_sd:
            self.sd = self.estimate_sd(X, sample_weight)
        self.ll = self.estimate_loglikelihood(X, Y, sample_weight)