kernel_width=nu)

    beta_emp_store = np.zeros((n_exp,dim+1))
    for i_exp in range(n_exp):
        
        if np.mod(i_exp + 1,10) == 0:
            s_exp = "Experiment {} / {} is running...".format(i_exp + 1,n_exp)
            print(s_exp)
        
        # getting the explanation
        exp = explainer.explain_instance(xi, 
                                         my_model, 
                                         num_samples=n_samples)

        # getting the coefficients of the local model
        beta_emp_store[i_exp,:] = format_coefs(exp)

    # computing the theoretical values
    beta_theo = compute_beta_indicator(xi,rect,nu,my_stats)

    ###########################################################################
    
    # plotting the results
    fig, ax = plt.subplots(figsize=(15,10))
    plot_whisker_boxes(beta_emp_store,
                           ax,
                           theo=beta_theo)
        
    # saving the fig
    s_name = "results/indicator_explanation"
    fig.savefig(s_name + '.pdf',format='pdf',bbox_inches = 'tight',pad_inches = 0) 
Beispiel #2
0
# hide_color = None gives mean replacement
hide_color = 0
rgb_hide_color = hide_color_helper(hide_color)
    
# entering the main loop
for i in tqdm(range(0,n_exp)):
       
    # explanation
    explanation = explainer.explain_instance(xi,
                                                 classifier_fn=basic_shape_detector,
                                                 top_labels=10,
                                                 hide_color=rgb_hide_color,
                                                 num_samples=n_examples,
                                                 segmentation_fn=segmenter)
    # get the explanations
    betahat_store[i,:] = format_coefs(explanation,0)
    
print("done!")
print()

# compute the theoretical explanations
exp_theo = compute_beta_basic_shape(xi,segments,indices,tau,hide_color=hide_color)

####################################################################

# this folder contains the resulting figure
fig_dir = "results/figures/"
mkdir(fig_dir)

# graphical parameters
plt.rcParams['pdf.fonttype'] = 42
            s_exp = "Experiment {} / {} is running...".format(i_exp + 1, n_exp)
            print(s_exp)

        # getting the explanations for both models and the sum
        exp_1 = explainer.explain_instance(xi,
                                           my_model_1,
                                           num_samples=n_samples)
        exp_2 = explainer.explain_instance(xi,
                                           my_model_2,
                                           num_samples=n_samples)
        exp_sum = explainer.explain_instance(xi,
                                             model_sum,
                                             num_samples=n_samples)

        # getting the coefficients of the local models
        beta_emp_store_1[i_exp, :] = format_coefs(exp_1)
        beta_emp_store_2[i_exp, :] = format_coefs(exp_2)
        beta_emp_store_sum[i_exp, :] = format_coefs(exp_sum)

        beta_mean_1 = np.mean(beta_emp_store_1, 0)
        beta_mean_2 = np.mean(beta_emp_store_2, 0)
        beta_mean_sum = np.mean(beta_emp_store_sum, 0)

    ###########################################################################

    # plotting the interpretable coefficients for the three models
    fig_1, ax_1 = plt.subplots(figsize=(15, 10))
    plot_whisker_boxes(beta_emp_store_1, ax_1, theo=beta_mean_1, color="k")

    fig_2, ax_2 = plt.subplots(figsize=(15, 10))
    plot_whisker_boxes(beta_emp_store_2, ax_2, theo=beta_mean_2, color="k")
    predictions = model.predict(tf.expand_dims(xi_norm, 0))
    predicted_class = np.argmax(predictions)

    # get the segments
    segments = segmenter(xi_norm)
    d = np.unique(segments).shape[0]

    # get the explanations
    beta_store = np.zeros((n_exp, d + 1))
    for i_exp in tqdm(range(n_exp)):
        print("run {} / {}".format(i_exp + 1, n_exp))
        explanation = explainer.explain_instance(xi_norm,
                                                 classifier_fn=model.predict,
                                                 num_samples=n_examples,
                                                 segmentation_fn=segmenter)
        beta_store[i_exp, :] = format_coefs(explanation, predicted_class)

    # store everything
    empirical = {}
    empirical["predictions"] = predictions
    empirical["segments"] = segments
    empirical["predicted_class"] = predicted_class
    empirical["confidence"] = predictions[0, predicted_class]
    empirical["explanations"] = beta_store
    empirical["image_name"] = id_image + 1

    pickle_name = result_path + str(id_image + 1).zfill(8) + '.pkl'

    print("saving results...")
    with open(pickle_name, 'wb') as f:
        pickle.dump(empirical, f)
# hide_color = None gives mean replacement
hide_color = 0
rgb_hide_color = hide_color_helper(hide_color)

# entering the main loop
for i in tqdm(range(0, n_exp)):

    # explanation
    explanation = explainer.explain_instance(xi_rgb,
                                             classifier_fn=linear_function,
                                             top_labels=10,
                                             hide_color=rgb_hide_color,
                                             num_samples=n_examples,
                                             segmentation_fn=segmenter)

    data_store[i, :] = format_coefs(explanation, 0)

print("done!")
print()

# computing theory
exp_theo = compute_beta_linear(xi_vec, segments, coefs, hide_color=hide_color)

####################################################################

fig_dir = "results/figures/"
mkdir(fig_dir)

# plot the examples and superpixels
fig, axis = plt.subplots(1, 3, figsize=(16, 5))