s_exp = "Experiment {} / {} is running...".format(i_exp + 1,n_exp) print(s_exp) # getting the explanation exp = explainer.explain_instance(xi, my_model, num_samples=n_samples) # getting the coefficients of the local model beta_emp_store[i_exp,:] = format_coefs(exp) # computing the theoretical values beta_theo = compute_beta_indicator(xi,rect,nu,my_stats) ########################################################################### # plotting the results fig, ax = plt.subplots(figsize=(15,10)) plot_whisker_boxes(beta_emp_store, ax, theo=beta_theo) # saving the fig s_name = "results/indicator_explanation" fig.savefig(s_name + '.pdf',format='pdf',bbox_inches = 'tight',pad_inches = 0)
s_exp = "Experiment {} / {} is running...".format( i_exp + 1, n_exp) print(s_exp) # getting the explanation exp = explainer.explain_instance(xi, my_model, num_samples=n_samples) # getting the coefficients of the surrogate model beta_emp_store[i_exp, :] = format_coefs(exp) # getting the values given by theory default_nu = np.sqrt(0.75 * dim) beta_theo = compute_beta_linear(xi, f, default_nu, my_stats) ################################################################### fig, ax = plt.subplots(figsize=(15, 10)) plot_whisker_boxes( beta_emp_store, ax, theo=beta_theo, title="Coefficients of surrogate model, $p={}$".format(p)) s_name = "results/cancellation_{}_boxes".format(p) fig.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0)
xi = xi_list[i_xi] ax.scatter(xi[0], xi[1], color='k') s_xi = r"$\xi_" + str(i_xi + 1) + "$" ax.annotate(s_xi, (xi[0] + 0.2, xi[1] + 0.2), fontsize=20) ax.plot() s_title = "2-D tree regressor" ax.set_title(s_title, fontsize=40) ax.tick_params(labelsize=30) ax.set_xlabel(r"$x_1$", fontsize=30) ax.set_ylabel(r"$x_2$", fontsize=30) s_name = "results/general_situation" fig.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0) # plot empirical vs theory for all the xis for i_xi in range(n_xi): fig, ax = plt.subplots(figsize=(5, 10)) s_title = r"Explanation for $\xi_" + str(i_xi + 1) + "$" plot_whisker_boxes(beta_emp_store[:, :, i_xi], ax, theo=beta_theo[:, i_xi], title=s_title, ylims=[-0.06, 0.15]) s_name = "results/explanation_xi_{}".format(i_xi + 1) fig.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0)
if np.mod(i_exp + 1, 10) == 0: s_exp = "Experiment {} / {} is running...".format(i_exp + 1, n_exp) print(s_exp) exp = explainer.explain_instance(xi, tree_regressor.predict, num_samples=n_samples) beta_emp_store[i_exp, :] = format_coefs(exp) # get the theory # this can take a while if dim or depth are not small print("Computing the theory...") beta_theo = compute_beta_tree(xi, nu, tree_regressor, my_stats) # ################################################################# # plot empirical vs theory fig, ax = plt.subplots(figsize=(15, 10)) plot_whisker_boxes( beta_emp_store, ax, theo=beta_theo, title=r"Coefficients of the surrogate model for a CART tree") s_name = "results/tree_explanation" fig.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0)
matplotlib.rc('ytick', labelsize=15) # plot the examples and superpixels fig, axis = plt.subplots(1,2,figsize=(10,5)) digit = mnist.target[samp] title_str = "Digit: {}".format(digit) # left panel: digit + segmentation plot_image_segmentation(axis[0],xi,segments=segments, indices=indices, title=title_str, method="nearest", out_size=299) # right panel: empirical results + theory on top plot_whisker_boxes(betahat_store, axis[1], title="Interpretable coefficients", xlabel="superpixels", theo=exp_theo, rotate=False, ylims=[-0.1,1.1], feature_names=np.linspace(1,d,d,dtype=int), color="red") # save figure s_name = fig_dir + "shape_detector_" + str(digit) + "_" + str(case) plt.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0, dpi=100)
print(s_exp) # getting the explanation at xi exp = explainer.explain_instance(xi, my_model, model_regressor=ridge_regressor, num_samples=n_samples) beta_emp_store[i_exp, :] = format_coefs(exp) # getting the theoretical values beta_theo = compute_beta_linear(xi, f, nu, my_stats) ############################################################################### # plotting the result fig, ax = plt.subplots(figsize=(15, 10)) plot_whisker_boxes( beta_emp_store, ax, title="Coefficients of the surrogate model ($\lambda={}$)".format( lbda), theo=beta_theo, ylims=ylims) # and save it s_name = "results/regularization_default_linear_lbda_{}".format(lbda) fig.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0)
print(s_exp) # getting the explanation exp = explainer.explain_instance(xi, my_model, num_samples=n_samples) # getting the coefficients of the local model beta_emp_store[i_exp, :] = format_coefs(exp) ########################################################################### # getting nicer feature names #my_features = wine['feature_names'] my_features = [ 'alcohol', 'Malic acid', 'ash', 'alcalinity', 'Mg', 'phenols', 'flavanoids', 'non-flavanoid phenol', 'proanthocyanins', 'color intensity', 'hue', 'part of diluted wines', 'proline' ] # plotting the result fig, ax = plt.subplots(figsize=(15, 10)) plot_whisker_boxes(beta_emp_store, ax, rotate=True, feature_names=my_features) s_name = "results/ignore_non_linear_default_weights" fig.savefig(s_name + '.pdf', format='pdf', bbox_inches='tight', pad_inches=0)
num_samples=n_samples) # getting the coefficients of the local models beta_emp_store_1[i_exp, :] = format_coefs(exp_1) beta_emp_store_2[i_exp, :] = format_coefs(exp_2) beta_emp_store_sum[i_exp, :] = format_coefs(exp_sum) beta_mean_1 = np.mean(beta_emp_store_1, 0) beta_mean_2 = np.mean(beta_emp_store_2, 0) beta_mean_sum = np.mean(beta_emp_store_sum, 0) ########################################################################### # plotting the interpretable coefficients for the three models fig_1, ax_1 = plt.subplots(figsize=(15, 10)) plot_whisker_boxes(beta_emp_store_1, ax_1, theo=beta_mean_1, color="k") fig_2, ax_2 = plt.subplots(figsize=(15, 10)) plot_whisker_boxes(beta_emp_store_2, ax_2, theo=beta_mean_2, color="k") fig_sum, ax_sum = plt.subplots(figsize=(15, 10)) plot_whisker_boxes(beta_emp_store_sum, ax_sum, theo=beta_mean_sum, color="red") # saving the fig fig_1.savefig("results/linearity_of_explanations_1.pdf", format='pdf', bbox_inches='tight', pad_inches=0)