Esempio n. 1
0
def get_decision_path_explanation(estimator, doc, vec, vectorized,
                                  x, feature_names,
                                  feature_filter, feature_re, top,
                                  original_display_names,
                                  target_names, targets, top_targets,
                                  is_regression, is_multiclass, proba,
                                  get_score_weights):
    # type: (...) -> Explanation

    display_names = get_target_display_names(
        original_display_names, target_names, targets, top_targets, proba)
    flt_feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    def get_top_features(weights, scale=1.0):
        return get_top_features_filtered(x, flt_feature_names, flt_indices,
                                         weights, top, scale)

    explanation = Explanation(
        estimator=repr(estimator),
        method='decision paths',
        description={
            (False, False): DESCRIPTION_CLF_BINARY,
            (False, True): DESCRIPTION_CLF_MULTICLASS,
            (True, False): DESCRIPTION_REGRESSION,
        }[is_regression, is_multiclass],
        is_regression=is_regression,
        targets=[],
    )
    assert explanation.targets is not None

    if is_multiclass:
        for label_id, label in display_names:
            score, all_feature_weights = get_score_weights(label_id)
            target_expl = TargetExplanation(
                target=label,
                feature_weights=get_top_features(all_feature_weights),
                score=score,
                proba=proba[label_id] if proba is not None else None,
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            explanation.targets.append(target_expl)
    else:
        score, all_feature_weights = get_score_weights(0)
        if is_regression:
            target, scale, label_id = display_names[-1][1], 1.0, 1
        else:
            target, scale, label_id = get_binary_target_scale_label_id(
                score, display_names, proba)

        target_expl = TargetExplanation(
            target=target,
            feature_weights=get_top_features(all_feature_weights, scale),
            score=score,
            proba=proba[label_id] if proba is not None else None,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        explanation.targets.append(target_expl)

    return explanation
Esempio n. 2
0
def explain_weights_sklearn_crfsuite(crf,
                                     top=20,
                                     target_names=None,
                                     targets=None,
                                     feature_re=None,
                                     feature_filter=None):
    """ Explain sklearn_crfsuite.CRF weights.

    See :func:`eli5.explain_weights` for description of
    ``top``, ``target_names``, ``targets``,
    ``feature_re`` and ``feature_filter`` parameters.
    """
    feature_names = np.array(crf.attributes_)
    state_coef = crf_state_coef(crf).todense().A
    transition_coef = crf_transition_coef(crf)

    if feature_filter is not None or feature_re is not None:
        state_feature_names, flt_indices = (
            FeatureNames(feature_names).handle_filter(feature_filter, feature_re))
        state_feature_names = np.array(state_feature_names.feature_names)
        state_coef = state_coef[:, flt_indices]
    else:
        state_feature_names = feature_names

    def _features(label_id):
        return get_top_features(state_feature_names, state_coef[label_id], top)

    if targets is None:
        targets = sorted_for_ner(crf.classes_)

    display_names = get_target_display_names(crf.classes_, target_names,
                                             targets)
    indices, names = zip(*display_names)
    transition_coef = filter_transition_coefs(transition_coef, indices)

    return Explanation(
        targets=[
            TargetExplanation(
                target=label,
                feature_weights=_features(label_id)
            )
            for label_id, label in zip(indices, names)
        ],
        transition_features=TransitionFeatureWeights(
            class_names=names,
            coef=transition_coef,
        ),
        estimator=repr(crf),
        method='CRF',
    )
Esempio n. 3
0
def get_decision_path_explanation(estimator, doc, vec, vectorized,
                                  original_display_names,
                                  target_names, targets, top_targets,
                                  is_regression, is_multiclass, proba,
                                  get_score_feature_weights):

    display_names = get_target_display_names(
        original_display_names, target_names, targets, top_targets, proba)

    explanation = Explanation(
        estimator=repr(estimator),
        method='decision paths',
        description={
            (False, False): DESCRIPTION_CLF_BINARY,
            (False, True): DESCRIPTION_CLF_MULTICLASS,
            (True, False): DESCRIPTION_REGRESSION,
        }[is_regression, is_multiclass],
        is_regression=is_regression,
        targets=[],
    )

    if is_multiclass:
        for label_id, label in display_names:
            score, feature_weights = get_score_feature_weights(label_id)
            target_expl = TargetExplanation(
                target=label,
                feature_weights=feature_weights,
                score=score,
                proba=proba[label_id] if proba is not None else None,
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            explanation.targets.append(target_expl)
    else:
        score, feature_weights = get_score_feature_weights(0)
        target_expl = TargetExplanation(
            target=display_names[-1][1],
            feature_weights=feature_weights,
            score=score,
            proba=proba[1] if proba is not None else None,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        explanation.targets.append(target_expl)

    return explanation
Esempio n. 4
0
def explain_prediction_tree_regressor(reg,
                                      doc,
                                      vec=None,
                                      top=None,
                                      top_targets=None,
                                      target_names=None,
                                      targets=None,
                                      feature_names=None,
                                      feature_re=None,
                                      feature_filter=None,
                                      vectorized=False):
    """ Explain prediction of a tree regressor.

    See :func:`eli5.explain_prediction` for description of
    ``top``, ``top_targets``, ``target_names``, ``targets``,
    ``feature_names``, ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the regressor ``reg``
    (e.g. a fitted CountVectorizer instance); you can pass it
    instead of ``feature_names``.

    ``vectorized`` is a flag which tells eli5 if ``doc`` should be
    passed through ``vec`` or not. By default it is False, meaning that
    if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
    regressor. Set it to True if you're passing ``vec``,
    but ``doc`` is already vectorized.

    Method for determining feature importances follows an idea from
    http://blog.datadive.net/interpreting-random-forests/.
    Feature weights are calculated by following decision paths in trees
    of an ensemble (or a single tree for DecisionTreeRegressor).
    Each node of the tree has an output score, and contribution of a feature
    on the decision path is how much the score changes from parent to child.
    Weights of all features sum to the output score of the estimator.
    """
    vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
    X = get_X(doc, vec=vec, vectorized=vectorized)
    if feature_names.bias_name is None:
        # Tree estimators do not have an intercept, but here we interpret
        # them as having an intercept
        feature_names.bias_name = '<BIAS>'

    score, = reg.predict(X)
    num_targets = getattr(reg, 'n_outputs_', 1)
    is_multitarget = num_targets > 1
    feature_weights = _trees_feature_weights(reg, X, feature_names,
                                             num_targets)
    x = get_X0(add_intercept(X))
    flt_feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    def _weights(label_id, scale=1.0):
        weights = feature_weights[:, label_id]
        return get_top_features_filtered(x, flt_feature_names, flt_indices,
                                         weights, top, scale)

    res = Explanation(
        estimator=repr(reg),
        method='decision path',
        description=(DESCRIPTION_TREE_REG_MULTITARGET
                     if is_multitarget else DESCRIPTION_TREE_REG),
        targets=[],
        is_regression=True,
    )
    assert res.targets is not None

    names = get_default_target_names(reg, num_targets=num_targets)
    display_names = get_target_display_names(names, target_names, targets,
                                             top_targets, score)

    if is_multitarget:
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,
                feature_weights=_weights(label_id),
                score=score[label_id],
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            res.targets.append(target_expl)
    else:
        target_expl = TargetExplanation(
            target=display_names[0][1],
            feature_weights=_weights(0),
            score=score,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        res.targets.append(target_expl)

    return res
Esempio n. 5
0
def explain_prediction_tree_classifier(clf,
                                       doc,
                                       vec=None,
                                       top=None,
                                       top_targets=None,
                                       target_names=None,
                                       targets=None,
                                       feature_names=None,
                                       feature_re=None,
                                       feature_filter=None,
                                       vectorized=False):
    """ Explain prediction of a tree classifier.

    See :func:`eli5.explain_prediction` for description of
    ``top``, ``top_targets``, ``target_names``, ``targets``,
    ``feature_names``, ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the classifier ``clf``
    (e.g. a fitted CountVectorizer instance); you can pass it
    instead of ``feature_names``.

    ``vectorized`` is a flag which tells eli5 if ``doc`` should be
    passed through ``vec`` or not. By default it is False, meaning that
    if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
    classifier. Set it to True if you're passing ``vec``,
    but ``doc`` is already vectorized.

    Method for determining feature importances follows an idea from
    http://blog.datadive.net/interpreting-random-forests/.
    Feature weights are calculated by following decision paths in trees
    of an ensemble (or a single tree for DecisionTreeClassifier).
    Each node of the tree has an output score, and contribution of a feature
    on the decision path is how much the score changes from parent to child.
    Weights of all features sum to the output score or proba of the estimator.
    """
    vec, feature_names = handle_vec(clf, doc, vec, vectorized, feature_names)
    X = get_X(doc, vec=vec, vectorized=vectorized)
    if feature_names.bias_name is None:
        # Tree estimators do not have an intercept, but here we interpret
        # them as having an intercept
        feature_names.bias_name = '<BIAS>'

    proba = predict_proba(clf, X)
    if hasattr(clf, 'decision_function'):
        score, = clf.decision_function(X)
    else:
        score = None

    is_multiclass = clf.n_classes_ > 2
    feature_weights = _trees_feature_weights(clf, X, feature_names,
                                             clf.n_classes_)
    x = get_X0(add_intercept(X))
    flt_feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    def _weights(label_id, scale=1.0):
        weights = feature_weights[:, label_id]
        return get_top_features_filtered(x, flt_feature_names, flt_indices,
                                         weights, top, scale)

    res = Explanation(
        estimator=repr(clf),
        method='decision path',
        targets=[],
        description=(DESCRIPTION_TREE_CLF_MULTICLASS
                     if is_multiclass else DESCRIPTION_TREE_CLF_BINARY),
    )
    assert res.targets is not None

    display_names = get_target_display_names(
        clf.classes_,
        target_names,
        targets,
        top_targets,
        score=score if score is not None else proba)

    if is_multiclass:
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,
                feature_weights=_weights(label_id),
                score=score[label_id] if score is not None else None,
                proba=proba[label_id] if proba is not None else None,
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            res.targets.append(target_expl)
    else:
        target, scale, label_id = get_binary_target_scale_label_id(
            score, display_names, proba)
        target_expl = TargetExplanation(
            target=target,
            feature_weights=_weights(label_id, scale=scale),
            score=score if score is not None else None,
            proba=proba[label_id] if proba is not None else None,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        res.targets.append(target_expl)

    return res
Esempio n. 6
0
def explain_prediction_linear_regressor(reg,
                                        doc,
                                        vec=None,
                                        top=None,
                                        top_targets=None,
                                        target_names=None,
                                        targets=None,
                                        feature_names=None,
                                        feature_re=None,
                                        feature_filter=None,
                                        vectorized=False):
    """
    Explain prediction of a linear regressor.

    See :func:`eli5.explain_prediction` for description of
    ``top``, ``top_targets``, ``target_names``, ``targets``,
    ``feature_names``, ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the classifier ``clf``;
    you can pass it instead of ``feature_names``.

    ``vectorized`` is a flag which tells eli5 if ``doc`` should be
    passed through ``vec`` or not. By default it is False, meaning that
    if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
    regressor ``reg``. Set it to True if you're passing ``vec``,
    but ``doc`` is already vectorized.
    """
    if isinstance(reg, (SVR, NuSVR)) and reg.kernel != 'linear':
        return explain_prediction_sklearn_not_supported(reg, doc)

    vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
    X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)

    score, = reg.predict(X)

    if has_intercept(reg):
        X = add_intercept(X)
    x = get_X0(X)

    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    res = Explanation(
        estimator=repr(reg),
        method='linear model',
        targets=[],
        is_regression=True,
    )
    assert res.targets is not None

    _weights = _linear_weights(reg, x, top, feature_names, flt_indices)
    names = get_default_target_names(reg)
    display_names = get_target_display_names(names, target_names, targets,
                                             top_targets, score)

    if is_multitarget_regressor(reg):
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,
                feature_weights=_weights(label_id),
                score=score[label_id],
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            res.targets.append(target_expl)
    else:
        target_expl = TargetExplanation(
            target=display_names[0][1],
            feature_weights=_weights(0),
            score=score,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        res.targets.append(target_expl)

    return res
Esempio n. 7
0
def explain_prediction_linear_classifier(
    clf,
    doc,
    vec=None,
    top=None,
    top_targets=None,
    target_names=None,
    targets=None,
    feature_names=None,
    feature_re=None,
    feature_filter=None,
    vectorized=False,
):
    """
    Explain prediction of a linear classifier.

    See :func:`eli5.explain_prediction` for description of
    ``top``, ``top_targets``, ``target_names``, ``targets``,
    ``feature_names``, ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the classifier ``clf``
    (e.g. a fitted CountVectorizer instance); you can pass it
    instead of ``feature_names``.

    ``vectorized`` is a flag which tells eli5 if ``doc`` should be
    passed through ``vec`` or not. By default it is False, meaning that
    if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
    classifier. Set it to True if you're passing ``vec``, but ``doc``
    is already vectorized.
    """
    vec, feature_names = handle_vec(clf, doc, vec, vectorized, feature_names)
    X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)

    proba = predict_proba(clf, X)
    score, = clf.decision_function(X)

    if has_intercept(clf):
        X = add_intercept(X)
    x = get_X0(X)

    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    res = Explanation(
        estimator=repr(clf),
        method='linear model',
        targets=[],
    )
    assert res.targets is not None

    _weights = _linear_weights(clf, x, top, feature_names, flt_indices)
    classes = getattr(clf, "classes_", ["-1", "1"])  # OneClassSVM support
    display_names = get_target_display_names(classes, target_names, targets,
                                             top_targets, score)

    if is_multiclass_classifier(clf):
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,
                feature_weights=_weights(label_id),
                score=score[label_id],
                proba=proba[label_id] if proba is not None else None,
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            res.targets.append(target_expl)
    else:
        if len(display_names) == 1:  # target is passed explicitly
            label_id, target = display_names[0]
        else:
            label_id = 1 if score >= 0 else 0
            target = display_names[label_id][1]
        scale = -1 if label_id == 0 else 1

        target_expl = TargetExplanation(
            target=target,
            feature_weights=_weights(0, scale=scale),
            score=score,
            proba=proba[label_id] if proba is not None else None,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        res.targets.append(target_expl)

    return res
Esempio n. 8
0
def explain_prediction_xgboost(
        xgb, doc,
        vec=None,
        top=None,
        top_targets=None,
        target_names=None,
        targets=None,
        feature_names=None,
        feature_re=None,
        feature_filter=None,
        vectorized=False,
        ):
    """ Return an explanation of XGBoost prediction (via scikit-learn wrapper
    XGBClassifier or XGBRegressor) as feature weights.

    See :func:`eli5.explain_prediction` for description of
    ``top``, ``top_targets``, ``target_names``, ``targets``,
    ``feature_names``, ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the estimator ``xgb``
    (e.g. a fitted CountVectorizer instance); you can pass it
    instead of ``feature_names``.

    ``vectorized`` is a flag which tells eli5 if ``doc`` should be
    passed through ``vec`` or not. By default it is False, meaning that
    if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
    estimator. Set it to False if you're passing ``vec``,
    but ``doc`` is already vectorized.

    Method for determining feature importances follows an idea from
    http://blog.datadive.net/interpreting-random-forests/.
    Feature weights are calculated by following decision paths in trees
    of an ensemble.
    Each leaf has an output score, and expected scores can also be assigned
    to parent nodes.
    Contribution of one feature on the decision path is how much expected score
    changes from parent to child.
    Weights of all features sum to the output score of the estimator.
    """
    num_features = len(xgb.booster().feature_names)
    vec, feature_names = handle_vec(
        xgb, doc, vec, vectorized, feature_names, num_features=num_features)
    if feature_names.bias_name is None:
        # XGBoost estimators do not have an intercept, but here we interpret
        # them as having an intercept
        feature_names.bias_name = '<BIAS>'

    X = get_X(doc, vec, vectorized=vectorized)
    if sp.issparse(X):
        # Work around XGBoost issue:
        # https://github.com/dmlc/xgboost/issues/1238#issuecomment-243872543
        X = X.tocsc()

    proba = predict_proba(xgb, X)
    scores_weights = _prediction_feature_weights(xgb, X, feature_names)

    x, = add_intercept(X)
    x = _missing_values_set_to_nan(x, xgb.missing, sparse_missing=True)
    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    is_multiclass = _xgb_n_targets(xgb) > 1
    is_regression = isinstance(xgb, XGBRegressor)
    names = xgb.classes_ if not is_regression else ['y']
    display_names = get_target_display_names(names, target_names, targets,
                                             top_targets, proba)

    res = Explanation(
        estimator=repr(xgb),
        method='decision paths',
        description={
            (False, False): DESCRIPTION_CLF_BINARY,
            (False, True): DESCRIPTION_CLF_MULTICLASS,
            (True, False): DESCRIPTION_REGRESSION,
        }[is_regression, is_multiclass],
        is_regression=is_regression,
        targets=[],
    )

    def get_score_feature_weights(_label_id):
        _score, _feature_weights = scores_weights[_label_id]
        _x = x
        if flt_indices is not None:
            _x = mask(_x, flt_indices)
            _feature_weights = mask(_feature_weights, flt_indices)
        return _score, get_top_features(
            feature_names, _feature_weights, top, _x)

    if is_multiclass:
        for label_id, label in display_names:
            score, feature_weights = get_score_feature_weights(label_id)
            target_expl = TargetExplanation(
                target=label,
                feature_weights=feature_weights,
                score=score,
                proba=proba[label_id] if proba is not None else None,
            )
            add_weighted_spans(doc, vec, vectorized, target_expl)
            res.targets.append(target_expl)
    else:
        score, feature_weights = get_score_feature_weights(0)
        target_expl = TargetExplanation(
            target=display_names[-1][1],
            feature_weights=feature_weights,
            score=score,
            proba=proba[1] if proba is not None else None,
        )
        add_weighted_spans(doc, vec, vectorized, target_expl)
        res.targets.append(target_expl)

    return res
Esempio n. 9
0
def explain_linear_regressor_weights(
    reg,
    vec=None,
    top=_TOP,
    target_names=None,
    targets=None,
    feature_names=None,
    coef_scale=None,
    feature_re=None,
    feature_filter=None,
):
    """
    Return an explanation of a linear regressor weights.

    See :func:`eli5.explain_weights` for description of
    ``top``, ``target_names``, ``targets``, ``feature_names``,
    ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the regressor ``reg``; you can
    pass it instead of ``feature_names``.

    ``coef_scale`` is a 1D np.ndarray with a scaling coefficient
    for each feature; coef[i] = coef[i] * coef_scale[i] if
    coef_scale[i] is not nan. Use it if you want to scale coefficients
    before displaying them, to take input feature sign or scale in account.
    """
    feature_names, coef_scale = handle_hashing_vec(vec, feature_names,
                                                   coef_scale)
    feature_names = get_feature_names(reg, vec, feature_names=feature_names)
    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re)
    _extra_caveats = "\n" + HASHING_CAVEATS if is_invhashing(vec) else ''

    def _features(target_id):
        coef = get_coef(reg, target_id, scale=coef_scale)
        if flt_indices is not None:
            coef = coef[flt_indices]
        return get_top_features(feature_names, coef, top)

    display_names = get_target_display_names(get_default_target_names(reg),
                                             target_names, targets)
    if is_multitarget_regressor(reg):
        return Explanation(
            targets=[
                TargetExplanation(target=target_name,
                                  feature_weights=_features(target_id))
                for target_id, target_name in display_names
            ],
            description=DESCRIPTION_REGRESSION_MULTITARGET + _extra_caveats,
            estimator=repr(reg),
            method='linear model',
            is_regression=True,
        )
    else:
        return Explanation(
            targets=[
                TargetExplanation(
                    target=display_names[0][1],
                    feature_weights=_features(0),
                )
            ],
            description=DESCRIPTION_REGRESSION + _extra_caveats,
            estimator=repr(reg),
            method='linear model',
            is_regression=True,
        )
Esempio n. 10
0
def explain_linear_classifier_weights(
    clf,
    vec=None,
    top=_TOP,
    target_names=None,
    targets=None,
    feature_names=None,
    coef_scale=None,
    feature_re=None,
    feature_filter=None,
):
    """
    Return an explanation of a linear classifier weights.

    See :func:`eli5.explain_weights` for description of
    ``top``, ``target_names``, ``targets``, ``feature_names``,
    ``feature_re`` and ``feature_filter`` parameters.

    ``vec`` is a vectorizer instance used to transform
    raw features to the input of the classifier ``clf``
    (e.g. a fitted CountVectorizer instance); you can pass it
    instead of ``feature_names``.

    ``coef_scale`` is a 1D np.ndarray with a scaling coefficient
    for each feature; coef[i] = coef[i] * coef_scale[i] if
    coef_scale[i] is not nan. Use it if you want to scale coefficients
    before displaying them, to take input feature sign or scale in account.
    """
    feature_names, coef_scale = handle_hashing_vec(vec, feature_names,
                                                   coef_scale)
    feature_names = get_feature_names(clf, vec, feature_names=feature_names)
    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re)

    _extra_caveats = "\n" + HASHING_CAVEATS if is_invhashing(vec) else ''

    def _features(label_id):
        coef = get_coef(clf, label_id, scale=coef_scale)
        if flt_indices is not None:
            coef = coef[flt_indices]
        return get_top_features(feature_names, coef, top)

    display_names = get_target_display_names(clf.classes_, target_names,
                                             targets)
    if is_multiclass_classifier(clf):
        return Explanation(
            targets=[
                TargetExplanation(target=label,
                                  feature_weights=_features(label_id))
                for label_id, label in display_names
            ],
            description=DESCRIPTION_CLF_MULTICLASS + _extra_caveats,
            estimator=repr(clf),
            method='linear model',
        )
    else:
        # for binary classifiers scikit-learn stores a single coefficient
        # vector, which corresponds to clf.classes_[1].
        return Explanation(
            targets=[
                TargetExplanation(
                    target=display_names[1][1],
                    feature_weights=_features(0),
                )
            ],
            description=DESCRIPTION_CLF_BINARY + _extra_caveats,
            estimator=repr(clf),
            method='linear model',
        )