Example #1
0
def test_ale_plot():
    """Test that proper errors are raised."""
    with pytest.raises(ValueError, match=r".*'model'.*'predictor'.*"):
        ale_plot(model=None, train_set=pd.DataFrame([1]), features=[0])

    with pytest.raises(ValueError, match=r"'3' 'features'.*"):
        ale_plot(model=SimpleModel(),
                 train_set=pd.DataFrame([1]),
                 features=list(range(3)))

    with pytest.raises(ValueError, match=r"'0' 'features'.*"):
        ale_plot(model=SimpleModel(), train_set=pd.DataFrame([1]), features=[])

    with pytest.raises(NotImplementedError,
                       match="'features_classes' is not implemented yet."):
        ale_plot(
            model=SimpleModel(),
            train_set=pd.DataFrame([1]),
            features=[0],
            features_classes=["a"],
        )

    with pytest.raises(ValueError,
                       match=r"1 feature.*but 'bins' was not an integer."):
        ale_plot(
            model=SimpleModel(),
            train_set=pd.DataFrame([1]),
            features=[0],
            bins=1.0,
        )
def test_model(features, columns):
    """Given a model with a predict method, a plot should be created."""
    plt.ion()  # Prevent plt.show() from blocking.
    np.random.seed(1)
    train_set = pd.DataFrame(np.random.random((100, len(columns))),
                             columns=columns)
    with assert_n_created_figures():
        ale_plot(SimpleModel(), train_set, features)
    # Clean up the created figure.
    plt.close()
def test_monte_carlo(features, columns):
    plt.ion()  # Prevent plt.show() from blocking.
    np.random.seed(1)
    train_set = pd.DataFrame(np.random.random((100, len(columns))),
                             columns=columns)
    with assert_n_created_figures():
        ale_plot(
            model=None,
            train_set=train_set,
            features=features,
            predictor=simple_predictor,
            monte_carlo=True,
        )
    # Clean up the created figure.
    plt.close()
def test_predictor(features, columns):
    """Given a predictor function, a plot should be created."""
    plt.ion()  # Prevent plt.show() from blocking.
    np.random.seed(1)
    train_set = pd.DataFrame(np.random.random((100, len(columns))),
                             columns=columns)
    with assert_n_created_figures():
        ale_plot(
            model=None,
            train_set=train_set,
            features=features,
            predictor=simple_predictor,
        )
    # Clean up the created figure.
    plt.close()
def test_df_column_features():
    """Test the handling of the `features` argument.

    No matter the type of the `features` iterable, `ale_plot` should be able to select
    the right columns.

    """
    plt.ion()  # Prevent plt.show() from blocking.
    n_col = 3
    np.random.seed(1)
    train_set = pd.DataFrame(np.random.random((100, n_col)),
                             columns=list(ascii_lowercase[:n_col]))
    with assert_n_created_figures():
        ale_plot(SimpleModel(), train_set, train_set.columns[:1])
    # Clean up the created figure.
    plt.close()
shap.summary_plot(shap_values, X_train)

shap.summary_plot(shap_values, X_train, plot_type="bar")

#Applying LIME for explainability (But we created a new file.py because apparently Lime works better with lightgbm than xgboost)

#Applying ALE for explainability
from alepython import ale_plot

matplotlib.rc("figure", figsize=(9, 6))
ale_plot(
    classifier,
    X_test,
    X_test.columns[:1],
    bins=20,
    monte_carlo=True,
    monte_carlo_rep=100,
    monte_carlo_ratio=0.6,
)

matplotlib.rc("figure", figsize=(9, 6))
ale_plot(classifier, X_test, X_test.columns[1:3], bins=10)

#Applying PDP for explainability for 1 feature

#Binary feature='Bump'
## 1.1 target distribution through feature 'Bump'
fig, axes, summary_df = info_plots.target_plot(df=train_df,
                                               feature='Bump',
                                               feature_name='Bump',