def LIME_graphSHAP(X, y, feature_names, ylabels, clf, xs_toexplain, labels_toexplain, ax=None, subplots=False, plotlime=True):

    ## Plot explanations on feature space
    if ax is None:
        if subplots:
            if len(xs_toexplain)>=5:
                nrows = int(len(xs_toexplain)/5)
                fig, axs = plt.subplots(nrows=nrows, ncols=int(len(xs_toexplain)/nrows), figsize=(15,3*nrows))
                axs = axs.flatten()
            else:
                fig, axs = plt.subplots(nrows=1, ncols=len(xs_toexplain), figsize=(15,3))
        else:
            fig, ax = plt.subplots()

    # Plot LIME result - loop if several
    for i in range(len(xs_toexplain)):
        
        if subplots:
            plt.sca(axs[i])
            ax = axs[i]
            
        if i==0 or subplots:
            

            # Plot contour of black-box predictions
            plot_classification_contour(X, clf, ax)
            # Plot training set
            plot_training_set(X, y, ax)

            ylim_bak = ax.get_ylim()
            xlim_bak = ax.get_xlim()
            #color_palette = sns.color_palette("bright", n_colors=len(xs_toexplain))
            color_palette = ['lime' for _ in range(len(xs_toexplain))]
        
        ## LIME - Generate explanations
        explainer = lime_assessment.lime_tabular.LimeTabularExplainer(X, feature_names=feature_names, class_names=ylabels, discretize_continuous=False, kernel_width=None)
        exp = explainer.explain_instance(xs_toexplain[i], clf.predict_proba, num_features=2, top_labels=len(ylabels), labels=range(len(ylabels)))
        
        shap_explainer = KernelExplainer(clf.predict_proba, X, nsamples=10000)
        e = shap_explainer.explain(np.reshape(xs_toexplain[i], (1, X.shape[1])))

        # Plot LIME regression
        if plotlime == True:
            plot_lime_regression(X, exp, xs_toexplain[i], labels_toexplain[i], ax, color_palette[i], exp.points_to_plot)
        x_ridge = [-10, 10]
        row = 0
        y_shap = [(0.5 - e.effects[0, row] * x - e.base_value[row])/e.effects[1, row] for x in x_ridge]


        # Plot LIME linear regression
        plt.sca(ax)
        plt.plot(x_ridge, y_shap, color='red', linestyle=':', linewidth=4, label="other shap regression")

        plt.scatter(xs_toexplain[i][0], xs_toexplain[i][1], color='lime', marker='8', linewidth=4)
        plt.ylim(ylim_bak)
        plt.xlim(xlim_bak)
Пример #2
0
def test_front_page_model_agnostic():
    from shap import KernelExplainer, DenseData, visualize, initjs
    from sklearn import datasets,neighbors
    from numpy import random, arange

    # print the JS visualization code to the notebook
    initjs()

    # train a k-nearest neighbors classifier on a random subset
    iris = datasets.load_iris()
    random.seed(2)
    inds = arange(len(iris.target))
    random.shuffle(inds)
    knn = neighbors.KNeighborsClassifier()
    knn.fit(iris.data, iris.target == 0)

    # use Shap to explain a single prediction
    background = DenseData(iris.data[inds[:100],:], iris.feature_names) # name the features
    explainer = KernelExplainer(knn.predict, background, nsamples=100)#knn.predict
    x = iris.data[inds[102:103],:]
    visualize(explainer.explain(x))