Exemplo n.º 1
0
def test_multinomial_loss():
    # test if the multinomial loss and gradient computations are consistent
    X, y = iris.data, iris.target.astype(np.float64)
    n_samples, n_features = X.shape
    n_classes = len(np.unique(y))

    rng = check_random_state(42)
    weights = rng.randn(n_features, n_classes)
    intercept = rng.randn(n_classes)
    sample_weights = rng.randn(n_samples)
    np.abs(sample_weights, sample_weights)

    # compute loss and gradient like in multinomial SAG
    dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
    loss_1, grad_1 = _multinomial_grad_loss_all_samples(
        dataset, weights, intercept, n_samples, n_features, n_classes)
    # compute loss and gradient like in multinomial LogisticRegression
    loss = LinearModelLoss(
        base_loss=HalfMultinomialLoss(n_classes=n_classes),
        fit_intercept=True,
    )
    weights_intercept = np.vstack((weights, intercept)).T
    loss_2, grad_2 = loss.loss_gradient(weights_intercept,
                                        X,
                                        y,
                                        l2_reg_strength=0.0,
                                        sample_weight=sample_weights)
    grad_2 = grad_2[:, :-1].T

    # comparison
    assert_array_almost_equal(grad_1, grad_2)
    assert_almost_equal(loss_1, loss_2)
Exemplo n.º 2
0
def test_multinomial_coef_shape(fit_intercept):
    """Test that multinomial LinearModelLoss respects shape of coef."""
    loss = LinearModelLoss(base_loss=HalfMultinomialLoss(),
                           fit_intercept=fit_intercept)
    n_samples, n_features = 10, 5
    X, y, coef = random_X_y_coef(linear_model_loss=loss,
                                 n_samples=n_samples,
                                 n_features=n_features,
                                 seed=42)
    s = np.random.RandomState(42).randn(*coef.shape)

    l, g = loss.loss_gradient(coef, X, y)
    g1 = loss.gradient(coef, X, y)
    g2, hessp = loss.gradient_hessian_product(coef, X, y)
    h = hessp(s)
    assert g.shape == coef.shape
    assert h.shape == coef.shape
    assert_allclose(g, g1)
    assert_allclose(g, g2)

    coef_r = coef.ravel(order="F")
    s_r = s.ravel(order="F")
    l_r, g_r = loss.loss_gradient(coef_r, X, y)
    g1_r = loss.gradient(coef_r, X, y)
    g2_r, hessp_r = loss.gradient_hessian_product(coef_r, X, y)
    h_r = hessp_r(s_r)
    assert g_r.shape == coef_r.shape
    assert h_r.shape == coef_r.shape
    assert_allclose(g_r, g1_r)
    assert_allclose(g_r, g2_r)

    assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order="F"))
    assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order="F"))
Exemplo n.º 3
0
def test_loss_gradients_hessp_intercept(base_loss, sample_weight,
                                        l2_reg_strength, X_sparse):
    """Test that loss and gradient handle intercept correctly."""
    loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=False)
    loss_inter = LinearModelLoss(base_loss=base_loss(), fit_intercept=True)
    n_samples, n_features = 10, 5
    X, y, coef = random_X_y_coef(linear_model_loss=loss,
                                 n_samples=n_samples,
                                 n_features=n_features,
                                 seed=42)

    X[:, -1] = 1  # make last column of 1 to mimic intercept term
    X_inter = X[:, :
                -1]  # exclude intercept column as it is added automatically by loss_inter

    if X_sparse:
        X = sparse.csr_matrix(X)

    if sample_weight == "range":
        sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])

    l, g = loss.loss_gradient(coef,
                              X,
                              y,
                              sample_weight=sample_weight,
                              l2_reg_strength=l2_reg_strength)
    _, hessp = loss.gradient_hessian_product(coef,
                                             X,
                                             y,
                                             sample_weight=sample_weight,
                                             l2_reg_strength=l2_reg_strength)
    l_inter, g_inter = loss_inter.loss_gradient(
        coef,
        X_inter,
        y,
        sample_weight=sample_weight,
        l2_reg_strength=l2_reg_strength)
    _, hessp_inter = loss_inter.gradient_hessian_product(
        coef,
        X_inter,
        y,
        sample_weight=sample_weight,
        l2_reg_strength=l2_reg_strength)

    # Note, that intercept gets no L2 penalty.
    assert l == pytest.approx(l_inter +
                              0.5 * l2_reg_strength * squared_norm(coef.T[-1]))

    g_inter_corrected = g_inter
    g_inter_corrected.T[-1] += l2_reg_strength * coef.T[-1]
    assert_allclose(g, g_inter_corrected)

    s = np.random.RandomState(42).randn(*coef.shape)
    h = hessp(s)
    h_inter = hessp_inter(s)
    h_inter_corrected = h_inter
    h_inter_corrected.T[-1] += l2_reg_strength * s.T[-1]
    assert_allclose(h, h_inter_corrected)
Exemplo n.º 4
0
def test_multinomial_loss_ground_truth():
    # n_samples, n_features, n_classes = 4, 2, 3
    n_classes = 3
    X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
    y = np.array([0, 1, 2, 0], dtype=np.float64)
    lbin = LabelBinarizer()
    Y_bin = lbin.fit_transform(y)

    weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
    intercept = np.array([1.0, 0, -0.2])
    sample_weights = np.array([0.8, 1, 1, 0.8])

    prediction = np.dot(X, weights) + intercept
    logsumexp_prediction = logsumexp(prediction, axis=1)
    p = prediction - logsumexp_prediction[:, np.newaxis]
    loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
    diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
    grad_1 = np.dot(X.T, diff)

    loss = LinearModelLoss(
        base_loss=HalfMultinomialLoss(n_classes=n_classes),
        fit_intercept=True,
    )
    weights_intercept = np.vstack((weights, intercept)).T
    loss_2, grad_2 = loss.loss_gradient(weights_intercept,
                                        X,
                                        y,
                                        l2_reg_strength=0.0,
                                        sample_weight=sample_weights)
    grad_2 = grad_2[:, :-1].T

    assert_almost_equal(loss_1, loss_2)
    assert_array_almost_equal(grad_1, grad_2)

    # ground truth
    loss_gt = 11.680360354325961
    grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
                        [-0.903942, +5.258745, -4.354803]])
    assert_almost_equal(loss_1, loss_gt)
    assert_array_almost_equal(grad_1, grad_gt)
Exemplo n.º 5
0
def glm_dataset(global_random_seed, request):
    """Dataset with GLM solutions, well conditioned X.

    This is inspired by ols_ridge_dataset in test_ridge.py.

    The construction is based on the SVD decomposition of X = U S V'.

    Parameters
    ----------
    type : {"long", "wide"}
        If "long", then n_samples > n_features.
        If "wide", then n_features > n_samples.
    model : a GLM model

    For "wide", we return the minimum norm solution:

        min ||w||_2 subject to w = argmin deviance(X, y, w)

    Note that the deviance is always minimized if y = inverse_link(X w) is possible to
    achieve, which it is in the wide data case. Therefore, we can construct the
    solution with minimum norm like (wide) OLS:

        min ||w||_2 subject to link(y) = raw_prediction = X w

    Returns
    -------
    model : GLM model
    X : ndarray
        Last column of 1, i.e. intercept.
    y : ndarray
    coef_unpenalized : ndarray
        Minimum norm solutions, i.e. min sum(loss(w)) (with mininum ||w||_2 in
        case of ambiguity)
        Last coefficient is intercept.
    coef_penalized : ndarray
        GLM solution with alpha=l2_reg_strength=1, i.e.
        min 1/n * sum(loss) + ||w[:-1]||_2^2.
        Last coefficient is intercept.
    l2_reg_strength : float
        Always equal 1.
    """
    data_type, model = request.param
    # Make larger dim more than double as big as the smaller one.
    # This helps when constructing singular matrices like (X, X).
    if data_type == "long":
        n_samples, n_features = 12, 4
    else:
        n_samples, n_features = 4, 12
    k = min(n_samples, n_features)
    rng = np.random.RandomState(global_random_seed)
    X = make_low_rank_matrix(
        n_samples=n_samples,
        n_features=n_features,
        effective_rank=k,
        tail_strength=0.1,
        random_state=rng,
    )
    X[:, -1] = 1  # last columns acts as intercept
    U, s, Vt = linalg.svd(X, full_matrices=False)
    assert np.all(s > 1e-3)  # to be sure
    assert np.max(s) / np.min(s) < 100  # condition number of X

    if data_type == "long":
        coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
        coef_unpenalized *= rng.choice([-1, 1], size=n_features)
        raw_prediction = X @ coef_unpenalized
    else:
        raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
        # minimum norm solution min ||w||_2 such that raw_prediction = X w:
        # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
        coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction

    linear_loss = LinearModelLoss(base_loss=model._get_loss(),
                                  fit_intercept=True)
    sw = np.full(shape=n_samples, fill_value=1 / n_samples)
    y = linear_loss.base_loss.link.inverse(raw_prediction)

    # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
    # optimizer. Note that the problem is well conditioned such that we get accurate
    # results.
    l2_reg_strength = 1
    fun = partial(
        linear_loss.loss,
        X=X[:, :-1],
        y=y,
        sample_weight=sw,
        l2_reg_strength=l2_reg_strength,
    )
    grad = partial(
        linear_loss.gradient,
        X=X[:, :-1],
        y=y,
        sample_weight=sw,
        l2_reg_strength=l2_reg_strength,
    )
    coef_penalized_with_intercept = _special_minimize(fun,
                                                      grad,
                                                      coef_unpenalized,
                                                      tol_NM=1e-6,
                                                      tol=1e-14)

    linear_loss = LinearModelLoss(base_loss=model._get_loss(),
                                  fit_intercept=False)
    fun = partial(
        linear_loss.loss,
        X=X[:, :-1],
        y=y,
        sample_weight=sw,
        l2_reg_strength=l2_reg_strength,
    )
    grad = partial(
        linear_loss.gradient,
        X=X[:, :-1],
        y=y,
        sample_weight=sw,
        l2_reg_strength=l2_reg_strength,
    )
    coef_penalized_without_intercept = _special_minimize(fun,
                                                         grad,
                                                         coef_unpenalized[:-1],
                                                         tol_NM=1e-6,
                                                         tol=1e-14)

    # To be sure
    assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
        coef_unpenalized)

    return (
        model,
        X,
        y,
        coef_unpenalized,
        coef_penalized_with_intercept,
        coef_penalized_without_intercept,
        l2_reg_strength,
    )
Exemplo n.º 6
0
def test_warm_start(solver, fit_intercept, global_random_seed):
    n_samples, n_features = 100, 10
    X, y = make_regression(
        n_samples=n_samples,
        n_features=n_features,
        n_informative=n_features - 2,
        bias=fit_intercept * 1.0,
        noise=1.0,
        random_state=global_random_seed,
    )
    y = np.abs(y)  # Poisson requires non-negative targets.
    alpha = 1
    params = {
        # "solver": solver,  # only lbfgs available
        "fit_intercept": fit_intercept,
        "tol": 1e-10,
    }

    glm1 = PoissonRegressor(warm_start=False,
                            max_iter=1000,
                            alpha=alpha,
                            **params)
    glm1.fit(X, y)

    glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
    # As we intentionally set max_iter=1 such that the solver should raise a
    # ConvergenceWarning.
    with pytest.warns(ConvergenceWarning):
        glm2.fit(X, y)

    linear_loss = LinearModelLoss(
        base_loss=glm1._get_loss(),
        fit_intercept=fit_intercept,
    )
    sw = np.full_like(y, fill_value=1 / n_samples)

    objective_glm1 = linear_loss.loss(
        coef=np.r_[glm1.coef_,
                   glm1.intercept_] if fit_intercept else glm1.coef_,
        X=X,
        y=y,
        sample_weight=sw,
        l2_reg_strength=alpha,
    )
    objective_glm2 = linear_loss.loss(
        coef=np.r_[glm2.coef_,
                   glm2.intercept_] if fit_intercept else glm2.coef_,
        X=X,
        y=y,
        sample_weight=sw,
        l2_reg_strength=alpha,
    )
    assert objective_glm1 < objective_glm2

    glm2.set_params(max_iter=1000)
    glm2.fit(X, y)
    # The two models are not exactly identical since the lbfgs solver
    # computes the approximate hessian from previous iterations, which
    # will not be strictly identical in the case of a warm start.
    assert_allclose(glm1.coef_, glm2.coef_, rtol=2e-4)
    assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
Exemplo n.º 7
0
def __logistic_regression_path(
    X,
    y,
    pos_class=None,
    Cs=10,
    fit_intercept=True,
    max_iter=100,
    tol=1e-4,
    verbose=0,
    solver='lbfgs',
    coef=None,
    class_weight=None,
    dual=False,
    penalty='l2',
    intercept_scaling=1.,
    multi_class='warn',
    random_state=None,
    check_input=True,
    max_squared_sum=None,
    sample_weight=None,
    l1_ratio=None,
    n_threads=1,
):
    """Compute a Logistic Regression model for a list of regularization
    parameters.

    This is an implementation that uses the result of the previous model
    to speed up computations along the set of solutions, making it faster
    than sequentially calling LogisticRegression for the different parameters.
    Note that there will be no speedup with liblinear solver, since it does
    not handle warm-starting.

    Read more in the :ref:`User Guide <logistic_regression>`.

    Parameters
    ----------
    X : array-like or sparse matrix, shape (n_samples, n_features)
        Input data.

    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Input data, target values.

    pos_class : int, None
        The class with respect to which we perform a one-vs-all fit.
        If None, then it is assumed that the given problem is binary.

    Cs : int | array-like, shape (n_cs,)
        List of values for the regularization parameter or integer specifying
        the number of regularization parameters that should be used. In this
        case, the parameters will be chosen in a logarithmic scale between
        1e-4 and 1e4.

    fit_intercept : bool
        Whether to fit an intercept for the model. In this case the shape of
        the returned array is (n_cs, n_features + 1).

    max_iter : int
        Maximum number of iterations for the solver.

    tol : float
        Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
        will stop when ``max{|g_i | i = 1, ..., n} <= tol``
        where ``g_i`` is the i-th component of the gradient.

    verbose : int
        For the liblinear and lbfgs solvers set verbose to any positive
        number for verbosity.

    solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
        Numerical solver to use.

    coef : array-like, shape (n_features,), default None
        Initialization value for coefficients of logistic regression.
        Useless for liblinear solver.

    class_weight : dict or 'balanced', optional
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``.

        Note that these weights will be multiplied with sample_weight (passed
        through the fit method) if sample_weight is specified.

    dual : bool
        Dual or primal formulation. Dual formulation is only implemented for
        l2 penalty with liblinear solver. Prefer dual=False when
        n_samples > n_features.

    penalty : str, 'l1', 'l2', or 'elasticnet'
        Used to specify the norm used in the penalization. The 'newton-cg',
        'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
        only supported by the 'saga' solver.

    intercept_scaling : float, default 1.
        Useful only when the solver 'liblinear' is used
        and self.fit_intercept is set to True. In this case, x becomes
        [x, self.intercept_scaling],
        i.e. a "synthetic" feature with constant value equal to
        intercept_scaling is appended to the instance vector.
        The intercept becomes ``intercept_scaling * synthetic_feature_weight``.

        Note! the synthetic feature weight is subject to l1/l2 regularization
        as all other features.
        To lessen the effect of regularization on synthetic feature weight
        (and therefore on the intercept) intercept_scaling has to be increased.

    multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
        If the option chosen is 'ovr', then a binary problem is fit for each
        label. For 'multinomial' the loss minimised is the multinomial loss fit
        across the entire probability distribution, *even when the data is
        binary*. 'multinomial' is unavailable when solver='liblinear'.
        'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
        and otherwise selects 'multinomial'.

        .. versionadded:: 0.18
           Stochastic Average Gradient descent solver for 'multinomial' case.
        .. versionchanged:: 0.20
            Default will change from 'ovr' to 'auto' in 0.22.

    random_state : int, RandomState instance or None, optional, default None
        The seed of the pseudo random number generator to use when shuffling
        the data.  If int, random_state is the seed used by the random number
        generator; If RandomState instance, random_state is the random number
        generator; If None, the random number generator is the RandomState
        instance used by `np.random`. Used when ``solver`` == 'sag' or
        'liblinear'.

    check_input : bool, default True
        If False, the input arrays X and y will not be checked.

    max_squared_sum : float, default None
        Maximum squared sum of X over samples. Used only in SAG solver.
        If None, it will be computed, going through all the samples.
        The value should be precomputed to speed up cross validation.

    sample_weight : array-like, shape(n_samples,) optional
        Array of weights that are assigned to individual samples.
        If not provided, then each sample is given unit weight.

    l1_ratio : float or None, optional (default=None)
        The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
        used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
        to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
        to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
        combination of L1 and L2.

    Returns
    -------
    coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
        List of coefficients for the Logistic Regression model. If
        fit_intercept is set to True then the second dimension will be
        n_features + 1, where the last item represents the intercept. For
        ``multiclass='multinomial'``, the shape is (n_classes, n_cs,
        n_features) or (n_classes, n_cs, n_features + 1).

    Cs : ndarray
        Grid of Cs used for cross-validation.

    n_iter : array, shape (n_cs,)
        Actual number of iteration for each Cs.

    Notes
    -----
    You might get slightly different results with the solver liblinear than
    with the others since this uses LIBLINEAR which penalizes the intercept.

    .. versionchanged:: 0.19
        The "copy" parameter was removed.
    """
    if isinstance(Cs, numbers.Integral):
        Cs = np.logspace(-4, 4, Cs)

    solver = _check_solver(solver, penalty, dual)

    # Preprocessing.
    if check_input:
        if sklearn_check_version('1.1'):
            X = check_array(
                X,
                accept_sparse='csr',
                dtype=np.float64,
                accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
            )
        else:
            X = check_array(
                X,
                accept_sparse='csr',
                dtype=np.float64,
                accept_large_sparse=solver != 'liblinear',
            )
        y = check_array(y, ensure_2d=False, dtype=None)
        check_consistent_length(X, y)
    _, n_features = X.shape

    classes = np.unique(y)
    random_state = check_random_state(random_state)

    multi_class = _check_multi_class(multi_class, solver, len(classes))
    if pos_class is None and multi_class != 'multinomial':
        if (classes.size > 2):
            raise ValueError('To fit OvR, use the pos_class argument')
        # np.unique(y) gives labels in sorted order.
        pos_class = classes[1]

    _patching_status = PatchingConditionsChain(
        "sklearn.linear_model.LogisticRegression.fit")
    _dal_ready = _patching_status.and_conditions([
        (solver in ['lbfgs',
                    'newton-cg'], f"'{solver}' solver is not supported. "
         "Only 'lbfgs' and 'newton-cg' solvers are supported."),
        (not sparse.issparse(X),
         "X is sparse. Sparse input is not supported."),
        (sample_weight is None, "Sample weights are not supported."),
        (class_weight is None, "Class weights are not supported.")
    ])

    if not _dal_ready:
        if sklearn_check_version('0.24'):
            sample_weight = _check_sample_weight(sample_weight,
                                                 X,
                                                 dtype=X.dtype,
                                                 copy=True)
        else:
            sample_weight = _check_sample_weight(sample_weight,
                                                 X,
                                                 dtype=X.dtype)
    # If class_weights is a dict (provided by the user), the weights
    # are assigned to the original labels. If it is "balanced", then
    # the class_weights are assigned after masking the labels with a OvR.
    le = LabelEncoder()
    if (isinstance(class_weight, dict) or multi_class == 'multinomial') and \
            not _dal_ready:
        class_weight_ = compute_class_weight(class_weight,
                                             classes=classes,
                                             y=y)
        if not np.allclose(class_weight_, np.ones_like(class_weight_)):
            sample_weight *= class_weight_[le.fit_transform(y)]

    # For doing a ovr, we need to mask the labels first. for the
    # multinomial case this is not necessary.
    if multi_class == 'ovr':
        y_bin = np.ones(y.shape, dtype=X.dtype)

        if sklearn_check_version('1.1'):
            mask = (y == pos_class)
            y_bin = np.ones(y.shape, dtype=X.dtype)
            # for compute_class_weight

            if solver in ["lbfgs", "newton-cg"]:
                # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
                # of in [-1, 1].
                mask_classes = np.array([0, 1])
                y_bin[~mask] = 0.0
            else:
                mask_classes = np.array([-1, 1])
                y_bin[~mask] = -1.0
        else:
            mask_classes = np.array([-1, 1])
            mask = (y == pos_class)
            y_bin[~mask] = -1.
            # for compute_class_weight

        if class_weight == "balanced" and not _dal_ready:
            class_weight_ = compute_class_weight(class_weight,
                                                 classes=mask_classes,
                                                 y=y_bin)
            if not np.allclose(class_weight_, np.ones_like(class_weight_)):
                sample_weight *= class_weight_[le.fit_transform(y_bin)]

        if _dal_ready:
            w0 = np.zeros(n_features + 1, dtype=X.dtype)
            y_bin[~mask] = 0.
        else:
            w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)

    else:
        if sklearn_check_version('1.1'):
            if solver in ["sag", "saga", "lbfgs", "newton-cg"]:
                # SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder,
                # not LabelBinarizer, i.e. y as a 1d-array of integers.
                # LabelEncoder also saves memory compared to LabelBinarizer, especially
                # when n_classes is large.
                if _dal_ready:
                    Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
                else:
                    le = LabelEncoder()
                    Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
            else:
                # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded.
                lbin = LabelBinarizer()
                Y_multi = lbin.fit_transform(y)
                if Y_multi.shape[1] == 1:
                    Y_multi = np.hstack([1 - Y_multi, Y_multi])
        else:
            if solver not in ['sag', 'saga']:
                if _dal_ready:
                    Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
                else:
                    lbin = LabelBinarizer()
                    Y_multi = lbin.fit_transform(y)
                    if Y_multi.shape[1] == 1:
                        Y_multi = np.hstack([1 - Y_multi, Y_multi])
            else:
                # SAG multinomial solver needs LabelEncoder, not LabelBinarizer
                le = LabelEncoder()
                Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)

        if _dal_ready:
            w0 = np.zeros((classes.size, n_features + 1),
                          order='C',
                          dtype=X.dtype)
        else:
            w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
                          order='F',
                          dtype=X.dtype)

    if coef is not None:
        # it must work both giving the bias term and not
        if multi_class == 'ovr':
            if coef.size not in (n_features, w0.size):
                raise ValueError(
                    'Initialization coef is of shape %d, expected shape '
                    '%d or %d' % (coef.size, n_features, w0.size))
            if _dal_ready:
                w0[-coef.size:] = \
                    np.roll(coef, 1, -1) if coef.size != n_features else coef
            else:
                w0[:coef.size] = coef
        else:
            # For binary problems coef.shape[0] should be 1, otherwise it
            # should be classes.size.
            n_classes = classes.size
            if n_classes == 2:
                n_classes = 1

            if coef.shape[0] != n_classes or \
                    coef.shape[1] not in (n_features, n_features + 1):
                raise ValueError(
                    'Initialization coef is of shape (%d, %d), expected '
                    'shape (%d, %d) or (%d, %d)' %
                    (coef.shape[0], coef.shape[1], classes.size, n_features,
                     classes.size, n_features + 1))

            if _dal_ready:
                w0[:, -coef.shape[1]:] = \
                    np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
            else:
                if n_classes == 1:
                    w0[0, :coef.shape[1]] = -coef
                    w0[1, :coef.shape[1]] = coef
                else:
                    w0[:, :coef.shape[1]] = coef

    C_daal_multiplier = 1
    # commented out because this is Py3 feature
    #def _map_to_binary_logistic_regression():
    #    nonlocal C_daal_multiplier
    #    nonlocal w0
    #    C_daal_multiplier = 2
    #    w0 *= 2

    if multi_class == 'multinomial':
        # fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
        if solver in ['lbfgs', 'newton-cg']:
            if _dal_ready and classes.size == 2:
                w0 = w0[-1:, :]
            if sklearn_check_version('1.1'):
                w0 = w0.ravel(order="F")
            else:
                w0 = w0.ravel()
        target = Y_multi
        loss = None
        if sklearn_check_version('1.1'):
            loss = LinearModelLoss(
                base_loss=HalfMultinomialLoss(n_classes=classes.size),
                fit_intercept=fit_intercept,
            )
        if solver == 'lbfgs':
            if _dal_ready:
                if classes.size == 2:
                    # _map_to_binary_logistic_regression()
                    C_daal_multiplier = 2
                    w0 *= 2
                    daal_extra_args_func = _daal4py_logistic_loss_extra_args
                else:
                    daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
                func = _daal4py_loss_and_grad
            else:
                if sklearn_check_version('1.1') and loss is not None:
                    func = loss.loss_gradient
                else:

                    def func(x, *args):
                        return _multinomial_loss_grad(x, *args)[0:2]
        elif solver == 'newton-cg':
            if _dal_ready:
                if classes.size == 2:
                    # _map_to_binary_logistic_regression()
                    C_daal_multiplier = 2
                    w0 *= 2
                    daal_extra_args_func = _daal4py_logistic_loss_extra_args
                else:
                    daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
                func = _daal4py_loss_
                grad = _daal4py_grad_
                hess = _daal4py_grad_hess_
            else:
                if sklearn_check_version('1.1') and loss is not None:
                    func = loss.loss
                    grad = loss.gradient
                    hess = loss.gradient_hessian_product  # hess = [gradient, hessp]
                else:

                    def func(x, *args):
                        return _multinomial_loss(x, *args)[0]

                    def grad(x, *args):
                        return _multinomial_loss_grad(x, *args)[1]

                    hess = _multinomial_grad_hess
        warm_start_sag = {'coef': w0.T}
    else:
        target = y_bin
        if solver == 'lbfgs':
            if _dal_ready:
                func = _daal4py_loss_and_grad
                daal_extra_args_func = _daal4py_logistic_loss_extra_args
            else:
                if sklearn_check_version('1.1'):
                    loss = LinearModelLoss(base_loss=HalfBinomialLoss(),
                                           fit_intercept=fit_intercept)
                    func = loss.loss_gradient
                else:
                    func = _logistic_loss_and_grad
        elif solver == 'newton-cg':
            if _dal_ready:
                daal_extra_args_func = _daal4py_logistic_loss_extra_args
                func = _daal4py_loss_
                grad = _daal4py_grad_
                hess = _daal4py_grad_hess_
            else:
                if sklearn_check_version('1.1'):
                    loss = LinearModelLoss(base_loss=HalfBinomialLoss(),
                                           fit_intercept=fit_intercept)
                    func = loss.loss
                    grad = loss.gradient
                    hess = loss.gradient_hessian_product  # hess = [gradient, hessp]
                else:
                    func = _logistic_loss

                    def grad(x, *args):
                        return _logistic_loss_and_grad(x, *args)[1]

                    hess = _logistic_grad_hess
        warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}

    coefs = list()
    n_iter = np.zeros(len(Cs), dtype=np.int32)
    for i, C in enumerate(Cs):
        if solver == 'lbfgs':
            if _dal_ready:
                extra_args = daal_extra_args_func(classes.size,
                                                  w0,
                                                  X,
                                                  target,
                                                  0.,
                                                  1. /
                                                  (2 * C * C_daal_multiplier),
                                                  fit_intercept,
                                                  value=True,
                                                  gradient=True,
                                                  hessian=False)
            else:
                if sklearn_check_version('1.1'):
                    l2_reg_strength = 1.0 / C
                    extra_args = (X, target, sample_weight, l2_reg_strength,
                                  n_threads)
                else:
                    extra_args = (X, target, 1. / C, sample_weight)

            iprint = [-1, 50, 1, 100,
                      101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
            opt_res = optimize.minimize(func,
                                        w0,
                                        method="L-BFGS-B",
                                        jac=True,
                                        args=extra_args,
                                        options={
                                            "iprint": iprint,
                                            "gtol": tol,
                                            "maxiter": max_iter
                                        })
            n_iter_i = _check_optimize_result(
                solver,
                opt_res,
                max_iter,
                extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
            w0, loss = opt_res.x, opt_res.fun
            if _dal_ready and C_daal_multiplier == 2:
                w0 /= 2
        elif solver == 'newton-cg':
            if _dal_ready:

                def make_ncg_funcs(f,
                                   value=False,
                                   gradient=False,
                                   hessian=False):
                    daal_penaltyL2 = 1. / (2 * C * C_daal_multiplier)
                    _obj_, X_, y_, n_samples = daal_extra_args_func(
                        classes.size,
                        w0,
                        X,
                        target,
                        0.,
                        daal_penaltyL2,
                        fit_intercept,
                        value=value,
                        gradient=gradient,
                        hessian=hessian)

                    def _func_(x, *args):
                        return f(x, _obj_, *args)

                    return _func_, (X_, y_, n_samples, daal_penaltyL2)

                loss_func, extra_args = make_ncg_funcs(func, value=True)
                grad_func, _ = make_ncg_funcs(grad, gradient=True)
                grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
                w0, n_iter_i = _newton_cg(grad_hess_func,
                                          loss_func,
                                          grad_func,
                                          w0,
                                          args=extra_args,
                                          maxiter=max_iter,
                                          tol=tol)
            else:
                if sklearn_check_version('1.1'):
                    l2_reg_strength = 1.0 / C
                    args = (X, target, sample_weight, l2_reg_strength,
                            n_threads)
                else:
                    args = (X, target, 1. / C, sample_weight)

                w0, n_iter_i = _newton_cg(hess,
                                          func,
                                          grad,
                                          w0,
                                          args=args,
                                          maxiter=max_iter,
                                          tol=tol)
        elif solver == 'liblinear':
            coef_, intercept_, n_iter_i, = _fit_liblinear(
                X,
                target,
                C,
                fit_intercept,
                intercept_scaling,
                None,
                penalty,
                dual,
                verbose,
                max_iter,
                tol,
                random_state,
                sample_weight=sample_weight,
            )
            if fit_intercept:
                w0 = np.concatenate([coef_.ravel(), intercept_])
            else:
                w0 = coef_.ravel()

        elif solver in ['sag', 'saga']:
            if multi_class == 'multinomial':
                target = target.astype(X.dtype, copy=False)
                loss = 'multinomial'
            else:
                loss = 'log'
            # alpha is for L2-norm, beta is for L1-norm
            if penalty == 'l1':
                alpha = 0.
                beta = 1. / C
            elif penalty == 'l2':
                alpha = 1. / C
                beta = 0.
            else:  # Elastic-Net penalty
                alpha = (1. / C) * (1 - l1_ratio)
                beta = (1. / C) * l1_ratio

            w0, n_iter_i, warm_start_sag = sag_solver(
                X,
                target,
                sample_weight,
                loss,
                alpha,
                beta,
                max_iter,
                tol,
                verbose,
                random_state,
                False,
                max_squared_sum,
                warm_start_sag,
                is_saga=(solver == 'saga'))

        else:
            raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
                             "'newton-cg', 'sag'}, got '%s' instead" % solver)

        if multi_class == 'multinomial':
            if _dal_ready:
                if classes.size == 2:
                    multi_w0 = w0[np.newaxis, :]
                else:
                    multi_w0 = np.reshape(w0, (classes.size, -1))
            else:
                if sklearn_check_version('1.1'):
                    if solver in ["lbfgs", "newton-cg"]:
                        multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
                    else:
                        multi_w0 = w0
                else:
                    n_classes = max(2, classes.size)
                    multi_w0 = np.reshape(w0, (n_classes, -1))
                if n_classes == 2:
                    multi_w0 = multi_w0[1][np.newaxis, :]
            coefs.append(multi_w0.copy())
        else:
            coefs.append(w0.copy())

        n_iter[i] = n_iter_i

    if _dal_ready:
        if fit_intercept:
            for i, ci in enumerate(coefs):
                coefs[i] = np.roll(ci, -1, -1)
        else:
            for i, ci in enumerate(coefs):
                coefs[i] = np.delete(ci, 0, axis=-1)

    _patching_status.write_log()

    return np.array(coefs), np.array(Cs), n_iter
Exemplo n.º 8
0
def test_loss_gradients_are_the_same(base_loss, fit_intercept, sample_weight,
                                     l2_reg_strength):
    """Test that loss and gradient are the same across different functions."""
    loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
    X, y, coef = random_X_y_coef(linear_model_loss=loss,
                                 n_samples=10,
                                 n_features=5,
                                 seed=42)

    if sample_weight == "range":
        sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])

    l1 = loss.loss(coef,
                   X,
                   y,
                   sample_weight=sample_weight,
                   l2_reg_strength=l2_reg_strength)
    g1 = loss.gradient(coef,
                       X,
                       y,
                       sample_weight=sample_weight,
                       l2_reg_strength=l2_reg_strength)
    l2, g2 = loss.loss_gradient(coef,
                                X,
                                y,
                                sample_weight=sample_weight,
                                l2_reg_strength=l2_reg_strength)
    g3, h3 = loss.gradient_hessian_product(coef,
                                           X,
                                           y,
                                           sample_weight=sample_weight,
                                           l2_reg_strength=l2_reg_strength)

    assert_allclose(l1, l2)
    assert_allclose(g1, g2)
    assert_allclose(g1, g3)

    # same for sparse X
    X = sparse.csr_matrix(X)
    l1_sp = loss.loss(coef,
                      X,
                      y,
                      sample_weight=sample_weight,
                      l2_reg_strength=l2_reg_strength)
    g1_sp = loss.gradient(coef,
                          X,
                          y,
                          sample_weight=sample_weight,
                          l2_reg_strength=l2_reg_strength)
    l2_sp, g2_sp = loss.loss_gradient(coef,
                                      X,
                                      y,
                                      sample_weight=sample_weight,
                                      l2_reg_strength=l2_reg_strength)
    g3_sp, h3_sp = loss.gradient_hessian_product(
        coef,
        X,
        y,
        sample_weight=sample_weight,
        l2_reg_strength=l2_reg_strength)

    assert_allclose(l1, l1_sp)
    assert_allclose(l1, l2_sp)
    assert_allclose(g1, g1_sp)
    assert_allclose(g1, g2_sp)
    assert_allclose(g1, g3_sp)
    assert_allclose(h3(g1), h3_sp(g1_sp))
Exemplo n.º 9
0
def test_gradients_hessians_numerically(base_loss, fit_intercept,
                                        sample_weight, l2_reg_strength):
    """Test gradients and hessians with numerical derivatives.

    Gradient should equal the numerical derivatives of the loss function.
    Hessians should equal the numerical derivatives of gradients.
    """
    loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
    n_samples, n_features = 10, 5
    X, y, coef = random_X_y_coef(linear_model_loss=loss,
                                 n_samples=n_samples,
                                 n_features=n_features,
                                 seed=42)
    coef = coef.ravel(order="F")  # this is important only for multinomial loss

    if sample_weight == "range":
        sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])

    # 1. Check gradients numerically
    eps = 1e-6
    g, hessp = loss.gradient_hessian_product(coef,
                                             X,
                                             y,
                                             sample_weight=sample_weight,
                                             l2_reg_strength=l2_reg_strength)
    # Use a trick to get central finite difference of accuracy 4 (five-point stencil)
    # https://en.wikipedia.org/wiki/Numerical_differentiation
    # https://en.wikipedia.org/wiki/Finite_difference_coefficient
    # approx_g1 = (f(x + eps) - f(x - eps)) / (2*eps)
    approx_g1 = optimize.approx_fprime(
        coef,
        lambda coef: loss.loss(
            coef - eps,
            X,
            y,
            sample_weight=sample_weight,
            l2_reg_strength=l2_reg_strength,
        ),
        2 * eps,
    )
    # approx_g2 = (f(x + 2*eps) - f(x - 2*eps)) / (4*eps)
    approx_g2 = optimize.approx_fprime(
        coef,
        lambda coef: loss.loss(
            coef - 2 * eps,
            X,
            y,
            sample_weight=sample_weight,
            l2_reg_strength=l2_reg_strength,
        ),
        4 * eps,
    )
    # Five-point stencil approximation
    # See: https://en.wikipedia.org/wiki/Five-point_stencil#1D_first_derivative
    approx_g = (4 * approx_g1 - approx_g2) / 3
    assert_allclose(g, approx_g, rtol=1e-2, atol=1e-8)

    # 2. Check hessp numerically along the second direction of the gradient
    vector = np.zeros_like(g)
    vector[1] = 1
    hess_col = hessp(vector)
    # Computation of the Hessian is particularly fragile to numerical errors when doing
    # simple finite differences. Here we compute the grad along a path in the direction
    # of the vector and then use a least-square regression to estimate the slope
    eps = 1e-3
    d_x = np.linspace(-eps, eps, 30)
    d_grad = np.array([
        loss.gradient(
            coef + t * vector,
            X,
            y,
            sample_weight=sample_weight,
            l2_reg_strength=l2_reg_strength,
        ) for t in d_x
    ])
    d_grad -= d_grad.mean(axis=0)
    approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
    assert_allclose(approx_hess_col, hess_col, rtol=1e-3)