def test_binomial_and_multinomial_loss(global_random_seed): """Test that multinomial loss with n_classes = 2 is the same as binomial loss.""" rng = np.random.RandomState(global_random_seed) n_samples = 20 binom = HalfBinomialLoss() multinom = HalfMultinomialLoss(n_classes=2) y_train = rng.randint(0, 2, size=n_samples).astype(np.float64) raw_prediction = rng.normal(size=n_samples) raw_multinom = np.empty((n_samples, 2)) raw_multinom[:, 0] = -0.5 * raw_prediction raw_multinom[:, 1] = 0.5 * raw_prediction assert_allclose( binom.loss(y_true=y_train, raw_prediction=raw_prediction), multinom.loss(y_true=y_train, raw_prediction=raw_multinom), )
def test_multinomial_loss(): # test if the multinomial loss and gradient computations are consistent X, y = iris.data, iris.target.astype(np.float64) n_samples, n_features = X.shape n_classes = len(np.unique(y)) rng = check_random_state(42) weights = rng.randn(n_features, n_classes) intercept = rng.randn(n_classes) sample_weights = rng.randn(n_samples) np.abs(sample_weights, sample_weights) # compute loss and gradient like in multinomial SAG dataset, _ = make_dataset(X, y, sample_weights, random_state=42) loss_1, grad_1 = _multinomial_grad_loss_all_samples( dataset, weights, intercept, n_samples, n_features, n_classes) # compute loss and gradient like in multinomial LogisticRegression loss = LinearModelLoss( base_loss=HalfMultinomialLoss(n_classes=n_classes), fit_intercept=True, ) weights_intercept = np.vstack((weights, intercept)).T loss_2, grad_2 = loss.loss_gradient(weights_intercept, X, y, l2_reg_strength=0.0, sample_weight=sample_weights) grad_2 = grad_2[:, :-1].T # comparison assert_array_almost_equal(grad_1, grad_2) assert_almost_equal(loss_1, loss_2)
def test_multinomial_coef_shape(fit_intercept): """Test that multinomial LinearModelLoss respects shape of coef.""" loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=fit_intercept) n_samples, n_features = 10, 5 X, y, coef = random_X_y_coef(linear_model_loss=loss, n_samples=n_samples, n_features=n_features, seed=42) s = np.random.RandomState(42).randn(*coef.shape) l, g = loss.loss_gradient(coef, X, y) g1 = loss.gradient(coef, X, y) g2, hessp = loss.gradient_hessian_product(coef, X, y) h = hessp(s) assert g.shape == coef.shape assert h.shape == coef.shape assert_allclose(g, g1) assert_allclose(g, g2) coef_r = coef.ravel(order="F") s_r = s.ravel(order="F") l_r, g_r = loss.loss_gradient(coef_r, X, y) g1_r = loss.gradient(coef_r, X, y) g2_r, hessp_r = loss.gradient_hessian_product(coef_r, X, y) h_r = hessp_r(s_r) assert g_r.shape == coef_r.shape assert h_r.shape == coef_r.shape assert_allclose(g_r, g1_r) assert_allclose(g_r, g2_r) assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order="F")) assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order="F"))
def test_multinomial_loss_fit_intercept_only(): """Test that fit_intercept_only returns the mean functional for CCE.""" rng = np.random.RandomState(0) n_classes = 4 loss = HalfMultinomialLoss(n_classes=n_classes) # Same logic as test_specific_fit_intercept_only. Here inverse link # function = softmax and link function = log - symmetry term. y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64) baseline_prediction = loss.fit_intercept_only(y_true=y_train) assert baseline_prediction.shape == (n_classes, ) p = np.zeros(n_classes, dtype=y_train.dtype) for k in range(n_classes): p[k] = (y_train == k).mean() assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p))) assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :])) for y_train in (np.zeros(shape=10), np.ones(shape=10)): y_train = y_train.astype(np.float64) baseline_prediction = loss.fit_intercept_only(y_true=y_train) assert baseline_prediction.dtype == y_train.dtype assert_all_finite(baseline_prediction)
def test_multinomial_loss_ground_truth(): # n_samples, n_features, n_classes = 4, 2, 3 n_classes = 3 X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]]) y = np.array([0, 1, 2, 0], dtype=np.float64) lbin = LabelBinarizer() Y_bin = lbin.fit_transform(y) weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]]) intercept = np.array([1.0, 0, -0.2]) sample_weights = np.array([0.8, 1, 1, 0.8]) prediction = np.dot(X, weights) + intercept logsumexp_prediction = logsumexp(prediction, axis=1) p = prediction - logsumexp_prediction[:, np.newaxis] loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum() diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin) grad_1 = np.dot(X.T, diff) loss = LinearModelLoss( base_loss=HalfMultinomialLoss(n_classes=n_classes), fit_intercept=True, ) weights_intercept = np.vstack((weights, intercept)).T loss_2, grad_2 = loss.loss_gradient(weights_intercept, X, y, l2_reg_strength=0.0, sample_weight=sample_weights) grad_2 = grad_2[:, :-1].T assert_almost_equal(loss_1, loss_2) assert_array_almost_equal(grad_1, grad_2) # ground truth loss_gt = 11.680360354325961 grad_gt = np.array([[-0.557487, -1.619151, +2.176638], [-0.903942, +5.258745, -4.354803]]) assert_almost_equal(loss_1, loss_gt) assert_array_almost_equal(grad_1, grad_gt)
(HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]), (HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]), (HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), (HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), (HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), (HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]), (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]), (HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), (HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), (HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), (HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]), (HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]), ] # y_pred and y_true do not always have the same domain (valid value range). # Hence, we define extra sets of parameters for each of them. Y_TRUE_PARAMS = [ # type: ignore # (loss, [y success], [y fail]) (HalfPoissonLoss(), [0], []), (HalfTweedieLoss(power=-3), [-100, -0.1, 0], []), (HalfTweedieLoss(power=0), [-100, 0], []), (HalfTweedieLoss(power=1.5), [0], []), (HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []), (HalfTweedieLossIdentity(power=0), [-100, 0], []), (HalfTweedieLossIdentity(power=1.5), [0], []), (HalfBinomialLoss(), [0, 1], []), (HalfMultinomialLoss(), [0.0, 1.0, 2], []), ]
def __logistic_regression_path( X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100, tol=1e-4, verbose=0, solver='lbfgs', coef=None, class_weight=None, dual=False, penalty='l2', intercept_scaling=1., multi_class='warn', random_state=None, check_input=True, max_squared_sum=None, sample_weight=None, l1_ratio=None, n_threads=1, ): """Compute a Logistic Regression model for a list of regularization parameters. This is an implementation that uses the result of the previous model to speed up computations along the set of solutions, making it faster than sequentially calling LogisticRegression for the different parameters. Note that there will be no speedup with liblinear solver, since it does not handle warm-starting. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Input data, target values. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : int | array-like, shape (n_cs,) List of values for the regularization parameter or integer specifying the number of regularization parameters that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4. fit_intercept : bool Whether to fit an intercept for the model. In this case the shape of the returned array is (n_cs, n_features + 1). max_iter : int Maximum number of iterations for the solver. tol : float Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. verbose : int For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'} Numerical solver to use. coef : array-like, shape (n_features,), default None Initialization value for coefficients of logistic regression. Useless for liblinear solver. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : str, 'l1', 'l2', or 'elasticnet' Used to specify the norm used in the penalization. The 'newton-cg', 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is only supported by the 'saga' solver. intercept_scaling : float, default 1. Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. .. versionchanged:: 0.20 Default will change from 'ovr' to 'auto' in 0.22. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'sag' or 'liblinear'. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like, shape(n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. l1_ratio : float or None, optional (default=None) The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. For ``multiclass='multinomial'``, the shape is (n_classes, n_cs, n_features) or (n_classes, n_cs, n_features + 1). Cs : ndarray Grid of Cs used for cross-validation. n_iter : array, shape (n_cs,) Actual number of iteration for each Cs. Notes ----- You might get slightly different results with the solver liblinear than with the others since this uses LIBLINEAR which penalizes the intercept. .. versionchanged:: 0.19 The "copy" parameter was removed. """ if isinstance(Cs, numbers.Integral): Cs = np.logspace(-4, 4, Cs) solver = _check_solver(solver, penalty, dual) # Preprocessing. if check_input: if sklearn_check_version('1.1'): X = check_array( X, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver not in ["liblinear", "sag", "saga"], ) else: X = check_array( X, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver != 'liblinear', ) y = check_array(y, ensure_2d=False, dtype=None) check_consistent_length(X, y) _, n_features = X.shape classes = np.unique(y) random_state = check_random_state(random_state) multi_class = _check_multi_class(multi_class, solver, len(classes)) if pos_class is None and multi_class != 'multinomial': if (classes.size > 2): raise ValueError('To fit OvR, use the pos_class argument') # np.unique(y) gives labels in sorted order. pos_class = classes[1] _patching_status = PatchingConditionsChain( "sklearn.linear_model.LogisticRegression.fit") _dal_ready = _patching_status.and_conditions([ (solver in ['lbfgs', 'newton-cg'], f"'{solver}' solver is not supported. " "Only 'lbfgs' and 'newton-cg' solvers are supported."), (not sparse.issparse(X), "X is sparse. Sparse input is not supported."), (sample_weight is None, "Sample weights are not supported."), (class_weight is None, "Class weights are not supported.") ]) if not _dal_ready: if sklearn_check_version('0.24'): sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True) else: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) # If class_weights is a dict (provided by the user), the weights # are assigned to the original labels. If it is "balanced", then # the class_weights are assigned after masking the labels with a OvR. le = LabelEncoder() if (isinstance(class_weight, dict) or multi_class == 'multinomial') and \ not _dal_ready: class_weight_ = compute_class_weight(class_weight, classes=classes, y=y) if not np.allclose(class_weight_, np.ones_like(class_weight_)): sample_weight *= class_weight_[le.fit_transform(y)] # For doing a ovr, we need to mask the labels first. for the # multinomial case this is not necessary. if multi_class == 'ovr': y_bin = np.ones(y.shape, dtype=X.dtype) if sklearn_check_version('1.1'): mask = (y == pos_class) y_bin = np.ones(y.shape, dtype=X.dtype) # for compute_class_weight if solver in ["lbfgs", "newton-cg"]: # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead # of in [-1, 1]. mask_classes = np.array([0, 1]) y_bin[~mask] = 0.0 else: mask_classes = np.array([-1, 1]) y_bin[~mask] = -1.0 else: mask_classes = np.array([-1, 1]) mask = (y == pos_class) y_bin[~mask] = -1. # for compute_class_weight if class_weight == "balanced" and not _dal_ready: class_weight_ = compute_class_weight(class_weight, classes=mask_classes, y=y_bin) if not np.allclose(class_weight_, np.ones_like(class_weight_)): sample_weight *= class_weight_[le.fit_transform(y_bin)] if _dal_ready: w0 = np.zeros(n_features + 1, dtype=X.dtype) y_bin[~mask] = 0. else: w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype) else: if sklearn_check_version('1.1'): if solver in ["sag", "saga", "lbfgs", "newton-cg"]: # SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder, # not LabelBinarizer, i.e. y as a 1d-array of integers. # LabelEncoder also saves memory compared to LabelBinarizer, especially # when n_classes is large. if _dal_ready: Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) else: le = LabelEncoder() Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) else: # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded. lbin = LabelBinarizer() Y_multi = lbin.fit_transform(y) if Y_multi.shape[1] == 1: Y_multi = np.hstack([1 - Y_multi, Y_multi]) else: if solver not in ['sag', 'saga']: if _dal_ready: Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) else: lbin = LabelBinarizer() Y_multi = lbin.fit_transform(y) if Y_multi.shape[1] == 1: Y_multi = np.hstack([1 - Y_multi, Y_multi]) else: # SAG multinomial solver needs LabelEncoder, not LabelBinarizer le = LabelEncoder() Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) if _dal_ready: w0 = np.zeros((classes.size, n_features + 1), order='C', dtype=X.dtype) else: w0 = np.zeros((classes.size, n_features + int(fit_intercept)), order='F', dtype=X.dtype) if coef is not None: # it must work both giving the bias term and not if multi_class == 'ovr': if coef.size not in (n_features, w0.size): raise ValueError( 'Initialization coef is of shape %d, expected shape ' '%d or %d' % (coef.size, n_features, w0.size)) if _dal_ready: w0[-coef.size:] = \ np.roll(coef, 1, -1) if coef.size != n_features else coef else: w0[:coef.size] = coef else: # For binary problems coef.shape[0] should be 1, otherwise it # should be classes.size. n_classes = classes.size if n_classes == 2: n_classes = 1 if coef.shape[0] != n_classes or \ coef.shape[1] not in (n_features, n_features + 1): raise ValueError( 'Initialization coef is of shape (%d, %d), expected ' 'shape (%d, %d) or (%d, %d)' % (coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1)) if _dal_ready: w0[:, -coef.shape[1]:] = \ np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef else: if n_classes == 1: w0[0, :coef.shape[1]] = -coef w0[1, :coef.shape[1]] = coef else: w0[:, :coef.shape[1]] = coef C_daal_multiplier = 1 # commented out because this is Py3 feature #def _map_to_binary_logistic_regression(): # nonlocal C_daal_multiplier # nonlocal w0 # C_daal_multiplier = 2 # w0 *= 2 if multi_class == 'multinomial': # fmin_l_bfgs_b and newton-cg accepts only ravelled parameters. if solver in ['lbfgs', 'newton-cg']: if _dal_ready and classes.size == 2: w0 = w0[-1:, :] if sklearn_check_version('1.1'): w0 = w0.ravel(order="F") else: w0 = w0.ravel() target = Y_multi loss = None if sklearn_check_version('1.1'): loss = LinearModelLoss( base_loss=HalfMultinomialLoss(n_classes=classes.size), fit_intercept=fit_intercept, ) if solver == 'lbfgs': if _dal_ready: if classes.size == 2: # _map_to_binary_logistic_regression() C_daal_multiplier = 2 w0 *= 2 daal_extra_args_func = _daal4py_logistic_loss_extra_args else: daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args func = _daal4py_loss_and_grad else: if sklearn_check_version('1.1') and loss is not None: func = loss.loss_gradient else: def func(x, *args): return _multinomial_loss_grad(x, *args)[0:2] elif solver == 'newton-cg': if _dal_ready: if classes.size == 2: # _map_to_binary_logistic_regression() C_daal_multiplier = 2 w0 *= 2 daal_extra_args_func = _daal4py_logistic_loss_extra_args else: daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args func = _daal4py_loss_ grad = _daal4py_grad_ hess = _daal4py_grad_hess_ else: if sklearn_check_version('1.1') and loss is not None: func = loss.loss grad = loss.gradient hess = loss.gradient_hessian_product # hess = [gradient, hessp] else: def func(x, *args): return _multinomial_loss(x, *args)[0] def grad(x, *args): return _multinomial_loss_grad(x, *args)[1] hess = _multinomial_grad_hess warm_start_sag = {'coef': w0.T} else: target = y_bin if solver == 'lbfgs': if _dal_ready: func = _daal4py_loss_and_grad daal_extra_args_func = _daal4py_logistic_loss_extra_args else: if sklearn_check_version('1.1'): loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept) func = loss.loss_gradient else: func = _logistic_loss_and_grad elif solver == 'newton-cg': if _dal_ready: daal_extra_args_func = _daal4py_logistic_loss_extra_args func = _daal4py_loss_ grad = _daal4py_grad_ hess = _daal4py_grad_hess_ else: if sklearn_check_version('1.1'): loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept) func = loss.loss grad = loss.gradient hess = loss.gradient_hessian_product # hess = [gradient, hessp] else: func = _logistic_loss def grad(x, *args): return _logistic_loss_and_grad(x, *args)[1] hess = _logistic_grad_hess warm_start_sag = {'coef': np.expand_dims(w0, axis=1)} coefs = list() n_iter = np.zeros(len(Cs), dtype=np.int32) for i, C in enumerate(Cs): if solver == 'lbfgs': if _dal_ready: extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 1. / (2 * C * C_daal_multiplier), fit_intercept, value=True, gradient=True, hessian=False) else: if sklearn_check_version('1.1'): l2_reg_strength = 1.0 / C extra_args = (X, target, sample_weight, l2_reg_strength, n_threads) else: extra_args = (X, target, 1. / C, sample_weight) iprint = [-1, 50, 1, 100, 101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)] opt_res = optimize.minimize(func, w0, method="L-BFGS-B", jac=True, args=extra_args, options={ "iprint": iprint, "gtol": tol, "maxiter": max_iter }) n_iter_i = _check_optimize_result( solver, opt_res, max_iter, extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG) w0, loss = opt_res.x, opt_res.fun if _dal_ready and C_daal_multiplier == 2: w0 /= 2 elif solver == 'newton-cg': if _dal_ready: def make_ncg_funcs(f, value=False, gradient=False, hessian=False): daal_penaltyL2 = 1. / (2 * C * C_daal_multiplier) _obj_, X_, y_, n_samples = daal_extra_args_func( classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept, value=value, gradient=gradient, hessian=hessian) def _func_(x, *args): return f(x, _obj_, *args) return _func_, (X_, y_, n_samples, daal_penaltyL2) loss_func, extra_args = make_ncg_funcs(func, value=True) grad_func, _ = make_ncg_funcs(grad, gradient=True) grad_hess_func, _ = make_ncg_funcs(hess, gradient=True) w0, n_iter_i = _newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args, maxiter=max_iter, tol=tol) else: if sklearn_check_version('1.1'): l2_reg_strength = 1.0 / C args = (X, target, sample_weight, l2_reg_strength, n_threads) else: args = (X, target, 1. / C, sample_weight) w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol) elif solver == 'liblinear': coef_, intercept_, n_iter_i, = _fit_liblinear( X, target, C, fit_intercept, intercept_scaling, None, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight, ) if fit_intercept: w0 = np.concatenate([coef_.ravel(), intercept_]) else: w0 = coef_.ravel() elif solver in ['sag', 'saga']: if multi_class == 'multinomial': target = target.astype(X.dtype, copy=False) loss = 'multinomial' else: loss = 'log' # alpha is for L2-norm, beta is for L1-norm if penalty == 'l1': alpha = 0. beta = 1. / C elif penalty == 'l2': alpha = 1. / C beta = 0. else: # Elastic-Net penalty alpha = (1. / C) * (1 - l1_ratio) beta = (1. / C) * l1_ratio w0, n_iter_i, warm_start_sag = sag_solver( X, target, sample_weight, loss, alpha, beta, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag, is_saga=(solver == 'saga')) else: raise ValueError("solver must be one of {'liblinear', 'lbfgs', " "'newton-cg', 'sag'}, got '%s' instead" % solver) if multi_class == 'multinomial': if _dal_ready: if classes.size == 2: multi_w0 = w0[np.newaxis, :] else: multi_w0 = np.reshape(w0, (classes.size, -1)) else: if sklearn_check_version('1.1'): if solver in ["lbfgs", "newton-cg"]: multi_w0 = np.reshape(w0, (n_classes, -1), order="F") else: multi_w0 = w0 else: n_classes = max(2, classes.size) multi_w0 = np.reshape(w0, (n_classes, -1)) if n_classes == 2: multi_w0 = multi_w0[1][np.newaxis, :] coefs.append(multi_w0.copy()) else: coefs.append(w0.copy()) n_iter[i] = n_iter_i if _dal_ready: if fit_intercept: for i, ci in enumerate(coefs): coefs[i] = np.roll(ci, -1, -1) else: for i, ci in enumerate(coefs): coefs[i] = np.delete(ci, 0, axis=-1) _patching_status.write_log() return np.array(coefs), np.array(Cs), n_iter