Пример #1
0
def demo_attribute(img_nos: list = None, att: Attributer = None):
    if att is None:
        model_name = VGG
        att = Attributer(model_name=model_name)
    if img_nos is None:
        img_nos = [11, 13, 15]
        #img_nos = [6, 97, 278]
    for img_no in img_nos:
        # image handler for later (method attributions)
        ih = ImageHandler(img_no=img_no, model_name=VGG)
        # predictions
        max_pred, max_p = att.predict_for_model(ih)
        plt.figure(figsize=(15, 10))
        plt.suptitle(
            'Attributions for example {}, prediction = `{}`, probability = {:.2f}'
            .format(img_no, max_pred, max_p))
        # original image
        plt.subplot(2, 4, 1)
        plt.axis('off')
        plt.title('ImageNet Example {}'.format(img_no))
        plt.imshow(
            plt.imread(get_image_file_name(IMG_BASE_PATH, img_no) + '.JPEG'))
        # annotated image
        plt.subplot(2, 4, 2)
        plt.title('Annotated Example {}'.format(img_no))
        plt.imshow(
            plt.imread(
                get_image_file_name(ANNOTATE_BASE_PATH, img_no) + '.JPEG'))
        # processed image
        plt.subplot(2, 4, 3)
        plt.title('Reshaped Example')
        plt.imshow(demo_resizer(img_no=img_no, target_size=ih.get_size()))
        # processed image
        plt.subplot(2, 4, 4)
        plt.title('Annotation Mask')
        plt.imshow(get_mask_for_eval(img_no=img_no, target_size=ih.get_size()),
                   cmap='seismic',
                   clim=(-1, 1))

        attributions = att.attribute_panel(ih=ih,
                                           methods=METHODS,
                                           save=False,
                                           visualise=False,
                                           take_threshold=False,
                                           take_absolute=False,
                                           sigma_multiple=1)
        # show attributions
        for i, a in enumerate(attributions.keys()):
            plt.subplot(2, 4, 5 + i)
            plt.title(a)
            plt.axis('off')
            plt.imshow(ih.get_original_img(), cmap='gray', alpha=0.75)
            plt.imshow(attributions[a],
                       cmap='seismic',
                       clim=(-1, 1),
                       alpha=0.8)
        plt.show()
        plt.clf()
        plt.close()
Пример #2
0
    def attribute(self, ih: ImageHandler):
        # get outputs for top prediction count "ranked_outputs"
        input_to_layer_n = self.map2layer(ih.get_expanded_img())
        shap_values, indexes = self.explainer.shap_values(X=input_to_layer_n,
                                                          nsamples=200,
                                                          ranked_outputs=1)

        # plot the explanations (SHAP value matrices) and save to file
        # print(len(shap_values))
        if type(shap_values) != list:
            shap_values = [shap_values]

        sh = shap_values[0]
        # aggregate along third axis (the RGB axis), resize and normalise to (-1, 1)
        sv = sh[0].sum(-1)
        # resize into input shape (~4x rescale for some models)
        sv = cv2.resize(sv, ih.get_size(), cv2.INTER_LINEAR)
        sv /= np.max(np.abs(sv))

        gb = self.guided_backprop(ih)
        guided_shap = gb * sv[..., np.newaxis]
        guided_shap = guided_shap.sum(axis=np.argmax(
            np.asarray(guided_shap.shape) == 3))
        guided_shap /= np.max(np.abs(guided_shap))

        return guided_shap[0]
Пример #3
0
 def get_image_handler_and_mask(self, img_no):
     # this gets the image wrapped in the ImageHandler object, and the
     # bounding box annotation mask for the image,
     # ImageHandler is used to calculate attributions by each method, and the
     # mask is used for evaluation
     ih = ImageHandler(img_no=img_no, model_name=self.model_name)
     # bounding box in the format of the model's input shape / attribution shape
     annotation_mask = get_mask_for_eval(img_no=img_no,
                                         target_size=ih.get_size(),
                                         save=False,
                                         visualise=False)
     return ih, annotation_mask
Пример #4
0
    def grad_cam(self, ih: ImageHandler, cls):
        """GradCAM method for visualizing input saliency."""
        y_c = self.model.output[0, cls]
        conv_output = self.model.layers[self.layer_no].output
        grads = K.gradients(y_c, conv_output)[0]
        # Normalize if necessary
        # grads = normalize(grads)
        gradient_function = K.function([self.model.input],
                                       [conv_output, grads])

        output, grads_val = gradient_function([ih.get_processed_img()])
        output, grads_val = output[0, :], grads_val[0, :, :, :]

        weights = np.mean(grads_val, axis=(0, 1))
        cam = np.dot(output, weights)

        # Process CAM
        cam = cv2.resize(cam, ih.get_size(), cv2.INTER_LINEAR)
        cam = np.maximum(cam, 0)
        cam_max = cam.max()
        if cam_max != 0:
            cam = cam / cam_max
        return cam