Esempio n. 1
0
def test_text_explainer_show_methods():
    pytest.importorskip('IPython')
    from IPython.display import HTML

    text = "Hello, world!"

    @_apply_to_list
    def predict_proba(doc):
        return [0.0, 1.0] if 'lo' in doc else [1.0, 0.0]

    te = TextExplainer()
    te.fit(text, predict_proba)

    pred_expl = te.show_prediction()
    assert isinstance(pred_expl, HTML)
    assert 'lo' in pred_expl.data

    weight_expl = te.show_weights()
    assert isinstance(weight_expl, HTML)
    assert 'lo' in weight_expl.data
Esempio n. 2
0
# -*- coding: utf-8 -*-
"""
__title__ = 'eli5'
__author__ = 'JieYuan'
__mtime__ = '2018/8/21'
"""
from eli5.lime import TextExplainer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline

X = [
    "The dimension of the input documents is reduced to 100, and then a kernel SVM is used to classify the documents.",
    "This is what the pipeline returns for a document - it is pretty sure the first message in test data belongs to sci.med:"
]

y = [0, 1]

piplie = make_pipeline(TfidfVectorizer(), LogisticRegression())

te = TextExplainer(random_state=42)
te.fit(X[0], piplie.predict_proba)
te.show_prediction()
te.show_weights()

eli5.show_prediction
Esempio n. 3
0
exp.show_prediction()

# In[381]:

exp.show_prediction(target_names=target_names)

# In[382]:

exp.metrics_

# - ‘score’ is an accuracy score weighted by cosine distance between generated sample and the original document (i.e. texts which are closer to the example are more important). Accuracy shows how good are ‘top 1’ predictions.
# - ‘mean_KL_divergence’ is a mean Kullback–Leibler divergence for all target classes; it is also weighted by distance. KL divergence shows how well are probabilities approximated; 0.0 means a perfect match.

# In[46]:

exp.show_weights()

# In[47]:

# Check For Vectorizer Used
exp.vec_

# In[48]:

# Check For Classifer Used
exp.clf_

# In[49]:

# In[ ]: