示例#1
0
def train_explainer(artifacts_folder: str, data: AdultData, model: RandomForestClassifier) -> AnchorTabular:
    def predict_fn(x):
        return model.predict(x)

    explainer = AnchorTabular(predict_fn, data.feature_names, categorical_names=data.category_map, seed=1)
    explainer.fit(data.X_train, disc_perc=(25, 50, 75))
    with open(f"{artifacts_folder}/{EXPLAINER_FOLDER}" + "/explainer.dill", "wb") as f:
        explainer.predictor = None
        explainer.samplers[0].predictor = None
        dill.dump(explainer, f)
    return explainer
    def retrain_classifier_final(self, args, nn_model_ref):
        nn_model_ref.epochs = args.num_epch_2
        nn_model_ref.batch_size_2 = args.batch_size_2
        nn_model_ref.net.freeze()
        X_train_proba_feat, X_eval_proba_feat = nn_model_ref.all_intermediaire, nn_model_ref.all_intermediaire_val
        Y_train_proba = nn_model_ref.Y_train_nn_binaire
        Y_eval_proba = nn_model_ref.Y_val_nn_binaire
        print("START RETRAIN LINEAR NN GOHR ")
        print()
        """net_retrain, h = train_speck_distinguisher(args, X_train_proba_feat.shape[1], X_train_proba_feat,
                                                   Y_train_proba, X_eval_proba_feat, Y_eval_proba,
                                                   bs=args.batch_size_2,
                                                   epoch=args.num_epch_2, name_ici="retrain_nn_gohr",
                                                   wdir=self.path_save_model)"""

        from alibi.explainers import AnchorTabular
        #from alibi.explainers import AnchorImage
        from sklearn.ensemble import RandomForestClassifier

        clf = RandomForestClassifier(n_estimators=50)
        clf.fit(X_train_proba_feat, Y_train_proba)
        predict_fn = lambda x: clf.predict_proba(x)
        feature_names = [i for i in range(X_train_proba_feat.shape[1])]
        explainer = AnchorTabular(predict_fn, feature_names)
        idx = 0
        explainer.fit(X_train_proba_feat, disc_perc=(25))
        print('Prediction: ',
              explainer.predictor(X_eval_proba_feat[idx].reshape(1, -1))[0])

        #print('Prediction: ', explainer.predict_fn(X_eval_proba_feat[idx].reshape(1, -1))[0])
        explanation = explainer.explain(X_eval_proba_feat[idx], threshold=0.8)
        print('Anchor: %s' % (' AND '.join(explanation['names'])))
        print('Precision: %.2f' % explanation['precision'])
        print('Coverage: %.2f' % explanation['coverage'])

        print(ok)

        return net_retrain
示例#3
0
st.write("""### Metrics:""")
st.write('Train accuracy:', accuracy_score(y_train,clf.predict(X_train)))
st.write('Test accuracy:', accuracy_score(y_test, clf.predict(X_test)))
confusion_matrix(y_test, y_pred)
st.write('Confusion matrix:')
plot_confusion_matrix(clf, X_test, y_test)
st.pyplot()
# st.write(classification_report(y_test, y_pred))
predict_fn = lambda x: clf.predict_proba(x)
explainer = AnchorTabular(predict_fn, feature_names)
explainer.fit(X_train)
idx = st.sidebar.slider(label='Select an instance:',min_value=1,max_value=len(y_test))
st.write("""### Selected instance:""")
st.write(X_test_df.iloc[[idx-1]], height=150)
print(y_train_df.iloc[[idx-1]])
st.write('Prediction: ', class_names[explainer.predictor(X_test[idx-1].reshape(1, -1))[0]])
st.write("""### Prediction Explained:""")
with st.spinner('Calculating'):
    explanation = explainer.explain(X_test[idx-1], threshold=0.70)
    st.write('Anchor (instance explanation): %s' % (' AND '.join(explanation.anchor)))
    st.write('Precision: %.2f' % explanation.precision)
    st.write('Coverage: %.2f' % explanation.coverage)
# st.write("""### Trust score:""")
    ts = TrustScore(k_filter=10,
                alpha=.05,
                filter_type='distance_knn',
                leaf_size=40,
                metric='euclidean',
                dist_filter_type='point')
    ts.fit(X_train, y_train, classes=len(class_names))
    score, closest_class = ts.score(X_test[idx-1].reshape(1,-1),