def explain_image(self, labels, instance, column_name=None, num_features=100000,
                      num_samples=300, batch_size=200, hide_color=0):
        """Explain an image of a prediction.

        It analyze the prediction by LIME, and returns a report of which words are most impactful
        in contributing to certain labels.

        Args:
          labels: a list of labels to explain.
          instance: the prediction instance. It needs to conform to model's input. Can be a csv
              line string, or a dict.
          column_name: which image column to explain. Can be None if there is only one image column
              in the model input.
          num_features: maximum number of areas (features) to analyze. Passed to
              LIME LimeImageExplainer directly.
          num_samples: size of the neighborhood to learn the linear model. Passed to
              LIME LimeImageExplainer directly.
          batch_size: size of batches passed to predict_fn. Passed to
              LIME LimeImageExplainer directly.
          hide_color: the color used to perturb images. Passed to
              LIME LimeImageExplainer directly.

        Returns:
          A LIME's lime.explanation.Explanation.

        Throws:
          ValueError if the given image column is not found in model input or column_name is None
              but there are multiple image columns in model input.
        """

        from lime.lime_image import LimeImageExplainer

        if len(self._image_columns) > 1 and not column_name:
            raise ValueError('There are multiple image columns in the input of the model. ' +
                             'Please specify "column_name".')
        elif column_name and column_name not in self._image_columns:
            raise ValueError('Specified column_name "%s" not found in the model input.'
                             % column_name)

        image_column_name = column_name if column_name else self._image_columns[0]
        if isinstance(instance, six.string_types):
            instance = next(csv.DictReader([instance], fieldnames=self._headers))

        predict_fn = self._make_image_predict_fn(labels, instance, image_column_name)
        explainer = LimeImageExplainer()
        with file_io.FileIO(instance[image_column_name], 'rb') as fi:
            im = Image.open(fi)
        im.thumbnail((299, 299), Image.ANTIALIAS)
        rgb_im = np.asarray(im.convert('RGB'))
        exp = explainer.explain_instance(
            rgb_im, predict_fn, labels=range(len(labels)), top_labels=None,
            hide_color=hide_color, num_features=num_features,
            num_samples=num_samples, batch_size=batch_size)
        return exp
def lime(model, image):
    explainer = LimeImageExplainer()
    explanation = explainer.explain_instance(image,
                                             model.predict,
                                             hide_color=0,
                                             top_labels=5,
                                             num_samples=100)
    temp, mask = explanation.get_image_and_mask(explanation.top_labels[0],
                                                positive_only=True,
                                                num_features=5,
                                                hide_rest=True)
    features = mark_boundaries(image, mask)
    label = explanation.top_labels[0]
    print(label)
    result = CLASSES[label]

    return label, temp, mask, features, result
Example #3
0
def Lime(model, x, y, seed):
    def Predict(Input):
        return model.predict(
            np.mean(Input.reshape((-1, 48, 48, 3)),
                    axis=3).reshape(-1, 48, 48, 1)).reshape((-1, 7))

    def Segmentation(Input):
        return slic(Input)

    explainer = LimeImageExplainer()
    explanation = explainer.explain_instance(image=x,
                                             classifier_fn=Predict,
                                             segmentation_fn=Segmentation,
                                             random_seed=seed)
    image, mask = explanation.get_image_and_mask(label=y,
                                                 positive_only=False,
                                                 hide_rest=False,
                                                 num_features=5,
                                                 min_weight=0.0)
    return image
Example #4
0
class LimeImageExplainer(ImageExplainer):
    def __init__(self, model, label):
        self.explainer = LimeImage()
        self.model = model
        self.label = label

    def explain(self, instance, budget):
        def classifier_fn(instances):
            instances = np.moveaxis(instances, -1, 1)
            instances = torch.tensor(instances).cuda().float()
            with torch.no_grad():
                output = self.model.predict_proba(instances)
            return output.cpu().numpy()

        instance = instance.double().detach().cpu().numpy()
        instance = np.moveaxis(instance, 0, -1)

        # Heuristic: each super pixel is about 5% of total pixels
        # max ensures we return at least one superpixel
        num_features = max(budget // 5, 1)

        exp = self.explainer.explain_instance(instance, classifier_fn)
        _, mask = exp.get_image_and_mask(
            label=self.label,
            positive_only=True,
            # negative_only=False,
            hide_rest=False,
            num_features=num_features,
            min_weight=0)

        mask = mask.astype(np.float32)

        # # only return above percentile
        # top_percentile = np.percentile(mask, 100 - budget)
        # mask[ mask < top_percentile ] = 0.0
        # mask[ mask >= top_percentile ] = 1.0

        return mask
class LimeExplainer:
    def __init__(self, random_seed=42):
        self.random_seed = random_seed
        self.explainer = LimeImageExplainer(random_state=self.random_seed)

    def explain(self,
                image_array: np.array,
                classifier_func: Callable,
                top_labels: int = 2):
        """
        Image (224x224) needs to be PyTorch Tensor.
        :return:
        """
        explanation = self.explainer.explain_instance(
            image_array,
            classifier_func,
            top_labels=top_labels,
            hide_color=0,
            random_seed=self.random_seed,
            num_samples=1000)
        img, mask = explanation.get_image_and_mask(explanation.top_labels[0],
                                                   positive_only=False,
                                                   num_features=10,
                                                   hide_rest=False)

        probabilities = classifier_func([image_array])

        print(f"Top labels:")
        for idx, (class_idx, class_probability) in enumerate(
                zip(explanation.top_labels, probabilities), 1):
            print(
                f"\t{class_idx})  '{LABEL_MAPPING[class_idx]}: {class_probability.max()}'"
            )

        img_boundary = mark_boundaries(img / 255.0, mask)
        return img_boundary
    def explain_image(self,
                      labels,
                      instance,
                      column_name=None,
                      num_features=100000,
                      num_samples=300,
                      batch_size=200,
                      hide_color=0):
        """Explain an image of a prediction.

        It analyze the prediction by LIME, and returns a report of which words are most impactful
        in contributing to certain labels.

        Args:
          labels: a list of labels to explain.
          instance: the prediction instance. It needs to conform to model's input. Can be a csv
              line string, or a dict.
          column_name: which image column to explain. Can be None if there is only one image column
              in the model input.
          num_features: maximum number of areas (features) to analyze. Passed to
              LIME LimeImageExplainer directly.
          num_samples: size of the neighborhood to learn the linear model. Passed to
              LIME LimeImageExplainer directly.
          batch_size: size of batches passed to predict_fn. Passed to
              LIME LimeImageExplainer directly.
          hide_color: the color used to perturb images. Passed to
              LIME LimeImageExplainer directly.

        Returns:
          A LIME's lime.explanation.Explanation.

        Throws:
          ValueError if the given image column is not found in model input or column_name is None
              but there are multiple image columns in model input.
        """

        from lime.lime_image import LimeImageExplainer

        if len(self._image_columns) > 1 and not column_name:
            raise ValueError(
                'There are multiple image columns in the input of the model. '
                + 'Please specify "column_name".')
        elif column_name and column_name not in self._image_columns:
            raise ValueError(
                'Specified column_name "%s" not found in the model input.' %
                column_name)

        image_column_name = column_name if column_name else self._image_columns[
            0]
        if isinstance(instance, six.string_types):
            instance = next(
                csv.DictReader([instance], fieldnames=self._headers))

        predict_fn = self._make_image_predict_fn(labels, instance,
                                                 image_column_name)
        explainer = LimeImageExplainer()
        with file_io.FileIO(instance[image_column_name], 'rb') as fi:
            im = Image.open(fi)
        im.thumbnail((299, 299), Image.ANTIALIAS)
        rgb_im = np.asarray(im.convert('RGB'))
        exp = explainer.explain_instance(rgb_im,
                                         predict_fn,
                                         labels=range(len(labels)),
                                         top_labels=None,
                                         hide_color=hide_color,
                                         num_features=num_features,
                                         num_samples=num_samples,
                                         batch_size=batch_size)
        return exp
Example #7
0
x_pred = face_image(image, dog, 128, 128)
print(x_pred.shape)

# load_model
model = load_model('./project/project02/model_save/best_xception.hdf5')

try:
    #--------------------------------------------------------------
    from lime.lime_image import LimeImageExplainer
    top_labels = 3

    explainer = LimeImageExplainer()
    explanation = explainer.explain_instance(x_pred[0],
                                             model.predict,
                                             hide_color=0,
                                             top_labels=top_labels,
                                             num_samples=1000)

    from skimage.segmentation import mark_boundaries
    temp, mask = explanation.get_image_and_mask(explanation.top_labels[0],
                                                positive_only=True,
                                                hide_rest=True)
    plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
    #--------------------------------------------------------------

    # predict
    prediction = model.predict(x_pred)
    number = np.argmax(prediction, axis=1)

    # 카테고리 불러오기
Example #8
0
class NNModel(object):
    """docstring for NNModel"""
    def __init__(self,
                 model,
                 logger,
                 datapath="/home/chris/data/ML/images/oxfordiiipets"):
        super(NNModel, self).__init__()
        self.datapath = datapath
        self.model = model
        self.logger = logger

    def load_raw_data(self):
        self.image_files = os.listdir(os.path.join(self.datapath, "images"))
        # self.trimap_files = os.listdir(os.path.join(self.datapath,
        #                                             "annotations", "trimaps"))
        # self.xml_files = os.listdir(os.path.join(self.datapath,
        #                                          "annotations", "xmls"))

    def preprocess(self):
        self.load_raw_data()
        self.images, valid_files = transform_img_fn([
            os.path.join(self.datapath, "images", i)
            for i in self.image_files[:100]
        ])
        self.valid = [self.image_files[i] for i in valid_files]

    def create_model_explainer(self):
        self.preds = self.model.predict(self.images)
        self.decoded_preds = decode_predictions(self.preds, )
        self.explainer = LimeImageExplainer()

    def get_explanation(self, i):
        title_text = self.valid[i].split("_")[:-1]
        title_text = " ".join(title_text)
        num_labels = 3
        preds = self.decoded_preds[i]
        explanation = self.explainer.explain_instance(self.images[i],
                                                      self.model.predict,
                                                      top_labels=num_labels,
                                                      hide_color=0,
                                                      num_samples=200)
        fig, subplots = plt.subplots(1, num_labels)
        fig.set_figheight(3)
        fig.set_figwidth(9)
        j = 0
        for exp, ax in zip(explanation.local_exp.keys(), subplots):
            temp, mask = explanation.get_image_and_mask(exp,
                                                        positive_only=False,
                                                        num_features=5,
                                                        hide_rest=False)
            ax.imshow(mark_boundaries(temp / 2 + 0.5, mask))
            ax.set_title(preds[j][1] + ", %.3f" % preds[j][2])
            j += 1
        plt.tight_layout()
        return mpld3.fig_to_html(fig), title_text

    def get_custom_explanation(self, filename):
        img = transform_custom_image(os.path.join('tmp', filename))
        num_labels = 3
        preds = self.model.predict(img)
        img = img.reshape(299, 299, 3)
        decoded_preds = decode_predictions(preds, )
        explanation = self.explainer.explain_instance(img,
                                                      self.model.predict,
                                                      top_labels=num_labels,
                                                      hide_color=0,
                                                      num_samples=200)
        fig, subplots = plt.subplots(1, num_labels)
        fig.set_figheight(3)
        fig.set_figwidth(9)
        j = 0
        for exp, ax in zip(explanation.local_exp.keys(), subplots):
            temp, mask = explanation.get_image_and_mask(exp,
                                                        positive_only=False,
                                                        num_features=5,
                                                        hide_rest=False)
            ax.imshow(mark_boundaries(temp / 2 + 0.5, mask))
            ax.set_title(decoded_preds[0][j][1] +
                         ", %.3f" % decoded_preds[0][j][2])
            j += 1
        plt.tight_layout()
        return mpld3.fig_to_html(fig), filename
Example #9
0
class XDeepLimeImageExplainer(Explainer):
    def __init__(self, predict_proba, class_names):
        """Init function.

        # Arguments
            predict_proba: Function. Classifier prediction probability function.
            class_names: List. A list of class names, ordered according to whatever the classifier is using.
        """
        Explainer.__init__(self, predict_proba, class_names)
        # Initialize explainer
        self.set_parameters()

    def set_parameters(self, **kwargs):
        """Parameter setter for lime_text.

        # Arguments
            **kwargs: Parameters setter. For more detail, please check https://lime-ml.readthedocs.io/en/latest/index.html.
        """
        self.explainer = LimeImageExplainer(**kwargs)

    def explain(self, instance, top_labels=None, labels=(1, ), **kwargs):
        """Generate explanation for a prediction using certain method.

        # Arguments
            instance: Array. An image to be explained.
            top_labels: Integer. Number of labels you care about.
            labels: Tuple. Labels you care about, if top_labels is not none, it will be replaced by the predicted top labels.
            **kwargs: Parameters setter. For more detail, please check https://lime-ml.readthedocs.io/en/latest/index.html.
        """
        Explainer.explain(self, instance, top_labels=top_labels, labels=labels)
        self.explanation = self.explainer.explain_instance(
            instance,
            self.predict_proba,
            top_labels=top_labels,
            labels=self.labels,
            **kwargs)

    def show_explanation(self,
                         deprocess=None,
                         positive_only=True,
                         num_features=5,
                         hide_rest=False):
        """Visualization of explanation of lime_image.

        # Arguments
            deprocess: Function. A function to deprocess the image.
            positive_only: Boolean. Whether only show feature with positive weight.
            num_features: Integer. Numbers of feature you care about.
            hide_rest: Boolean. Whether to hide rest of the image.
        """
        Explainer.show_explanation(self)
        exp = self.explanation
        labels = self.labels

        print()
        print("LIME Explanation")
        print()

        fig, axs = plt.subplots(nrows=1,
                                ncols=len(labels),
                                figsize=(len(labels) * 3, 3))

        assert hasattr(labels, '__len__')
        for idx in range(len(labels)):
            # Inverse
            label = labels[-idx - 1]
            result = exp.intercept[label]
            local_exp = exp.local_exp[label]
            for item in local_exp:
                result += item[1]
            print("Explanation for label {}:".format(self.class_names[label]))
            print("Local Prediction:     {:.3f}".format(result))
            print("Original Prediction:  {:.3f}".format(
                self.original_pred[label]))
            print()
            temp, mask = exp.get_image_and_mask(label,
                                                positive_only=positive_only,
                                                num_features=num_features,
                                                hide_rest=hide_rest)
            if deprocess is not None:
                temp = deprocess(temp)
            if len(labels) == 1:
                axs.imshow(mark_boundaries(temp, mask), alpha=0.9)
                axs.axis('off')
                axs.set_title("{}".format(self.class_names[label]))
            else:
                axs[idx].imshow(mark_boundaries(temp, mask), alpha=0.9)
                axs[idx].axis('off')
                axs[idx].set_title("{}".format(self.class_names[label]))
        plt.show()
Example #10
0
def predict(request):
    """ Predict - Show Image(with lime) and Probabilities """
    import numpy as np
    import matplotlib.pyplot as plt

    from skimage.segmentation import mark_boundaries
    from keras.preprocessing import image
    from keras.models import load_model
    from lime.lime_image import LimeImageExplainer
    from lime.wrappers.scikit_image import SegmentationAlgorithm

    if request.method == 'POST' and request.FILES['test']:
        if not os.path.exists(os.path.join(STATIC_URL, 'img/test/')):
            os.mkdir(os.path.join(STATIC_URL, 'img/test/'))

        test = request.FILES['test']
        with open(os.path.join(STATIC_URL, 'img/test/', 'test.jpg'),
                  'wb+') as destination:
            for chunk in test.chunks():
                destination.write(chunk)

        img = image.load_img(os.path.join(STATIC_URL, 'img/test/', 'test.jpg'),
                             target_size=(128, 128))
        img = image.img_to_array(img)
        img = np.expand_dims(img, axis=0)
        o_img = img / 255

        t_img = o_img[0]  #for lime (4D -> 3D)
        t_img = t_img.astype('double')

        model = cnn_model()
        model.load_weights('./model/cnn_model.h5')
        guess = np.argmax(model.predict(o_img), axis=-1)
        out = 'dog' if guess == 1 else 'cat'

        lime_explainer = LimeImageExplainer()
        segmenter = SegmentationAlgorithm('slic',
                                          n_segments=100,
                                          compactness=1,
                                          sigma=1)
        explanation = lime_explainer.explain_instance(
            t_img, model.predict, segmentation_fn=segmenter)
        temp, mask = explanation.get_image_and_mask(
            model.predict(o_img).argmax(axis=1)[0],
            positive_only=True,
            hide_rest=False)

        fig = plt.figure()
        plt.imshow(mark_boundaries(temp, mask))
        plt.axis('off')
        plt.savefig(os.path.join(STATIC_URL, 'img/test/', 'lime.jpg'), )
        plt.close(fig)

        context = {
            'content': out,
            'prob_cat': model.predict(o_img)[0][0],
            'prob_dog': model.predict(o_img)[0][1],
        }

        return render(request, 'predict/predict.html', context)

    return render(request, 'predict/predict.html', {'content': 'wrong access'})
Example #11
0
class LimeCounterfactualImage(object):
    def __init__(self):
        """
        Initialise the LimeImageExplainer

        Args:
            None
        """
        self.lime_explainer = LimeImageExplainer()

    def explain_instance(self, image, predict_fn, step_size=1000):
        """
        Return a grayscale image showing the counterfactual for a give input image

        Args:
            image (np.ndarray): Input image for which counterfactual is to be produced.
            predict_fn (Callable): A function which gives the models predictions.
            step_size (None or int): Number of random pixels to change everytime we
                                     try to make a new counterfactual candidate.
        """
        explanation = self.lime_explainer.explain_instance(
            image.astype("uint8"),
            predict_fn,
            top_labels=2,
            hide_color=0,
            num_samples=10,
        )
        temp, mask = explanation.get_image_and_mask(
            explanation.top_labels[0],
            positive_only=False,
            num_features=10,
            hide_rest=False,
        )
        real_pred = explanation.top_labels[0]
        new_img = image.copy()
        all_positives_removed, out_idx, in_idx = set(), 0, 0
        while real_pred == explanation.top_labels[0]:
            positives = list()
            for x, mrow in enumerate(mask):
                for y, mcol in enumerate(mrow):
                    if mcol == 1:
                        positives.append((x, y))
            new_mask = mask.copy()
            new_img = new_img.copy()
            to_change = random.sample(positives, min(step_size, len(positives)))
            for p in to_change:
                (
                    new_img[p[0]][p[1]][0],
                    new_img[p[0]][p[1]][1],
                    new_img[p[0]][p[1]][2],
                ) = (0, 0, 0)
                all_positives_removed.add(p)
            explanation = self.lime_explainer.explain_instance(
                new_img.astype("uint8"),
                predict_fn,
                top_labels=2,
                hide_color=0,
                num_samples=10,
            )
            temp, mask = explanation.get_image_and_mask(
                explanation.top_labels[0],
                positive_only=False,
                num_features=10,
                hide_rest=False,
            )
        grayscale_img = image.copy().astype("uint8")
        for out_idx, mrow in enumerate(grayscale_img):
            for in_idx, mcol in enumerate(mrow):
                if (out_idx, in_idx) not in all_positives_removed:
                    grayscale_img[out_idx][in_idx][0] = 0
                    grayscale_img[out_idx][in_idx][1] = 0
                    grayscale_img[out_idx][in_idx][2] = 0
        return grayscale_img