Beispiel #1
0
def make_anchor_tabular(dirname: Optional[Path] = None) -> AnchorTabular:
    # train model
    iris_data = load_iris()

    clf = LogisticRegression(solver="liblinear", multi_class="ovr")
    clf.fit(iris_data.data, iris_data.target)

    # create explainer
    explainer = AnchorTabular(clf.predict,
                              feature_names=iris_data.feature_names)
    explainer.fit(iris_data.data, disc_perc=(25, 50, 75))

    if dirname is not None:
        explainer.save(dirname)
    return explainer
Beispiel #2
0
import numpy as np
from sklearn.datasets import load_iris
from alibi.explainers import AnchorTabular

import requests

dataset = load_iris()
feature_names = dataset.feature_names
iris_data = dataset.data

model_url = "http://localhost:8003/seldon/seldon/iris/api/v1.0/predictions"


def predict_fn(X):
    data = {"data": {"ndarray": X.tolist()}}
    r = requests.post(model_url, json={"data": {"ndarray": [[1, 2, 3, 4]]}})
    return np.array(r.json()["data"]["ndarray"])


explainer = AnchorTabular(predict_fn, feature_names)
explainer.fit(iris_data, disc_perc=(25, 50, 75))

explainer.save("./explainer/")
Beispiel #3
0
def make_anchor_tabular_income(
        dirname: Optional[Path] = None) -> AnchorTabular:
    # adapted from:
    # https://docs.seldon.io/projects/alibi/en/latest/examples/anchor_tabular_adult.html
    np.random.seed(0)

    # prepare data
    adult = fetch_adult()
    data = adult.data
    target = adult.target
    feature_names = adult.feature_names
    category_map = adult.category_map

    data_perm = np.random.permutation(np.c_[data, target])
    data = data_perm[:, :-1]
    target = data_perm[:, -1]

    # build model
    idx = 30000
    X_train, Y_train = data[:idx, :], target[:idx]
    X_test, Y_test = data[idx + 1:, :], target[idx + 1:]

    ordinal_features = [
        x for x in range(len(feature_names))
        if x not in list(category_map.keys())
    ]
    ordinal_transformer = Pipeline(steps=[
        ("imputer", SimpleImputer(strategy="median")),
        ("scaler", StandardScaler()),
    ])

    categorical_features = list(category_map.keys())
    categorical_transformer = Pipeline(steps=[
        ("imputer", SimpleImputer(strategy="median")),
        ("onehot", OneHotEncoder(handle_unknown="ignore")),
    ])

    preprocessor = ColumnTransformer(transformers=[
        ("num", ordinal_transformer, ordinal_features),
        ("cat", categorical_transformer, categorical_features),
    ])

    clf = RandomForestClassifier(n_estimators=50)

    model_pipeline = Pipeline(steps=[
        ("preprocess", preprocessor),
        ("classifier", clf),
    ])

    model_pipeline.fit(X_train, Y_train)

    explainer = AnchorTabular(model_pipeline.predict,
                              feature_names,
                              categorical_names=category_map,
                              seed=1)

    explainer.fit(X_train, disc_perc=[25, 50, 75])

    if dirname is not None:
        explainer.save(dirname)
    return explainer