示例#1
0
    def image_predict():
        image_path = os.path.join(tmp_dir, session["image_id"] + ".png")
        image = prepare_for_prediction(model, image_path)

        label_onehot = model.predict(image)[0]
        result = [float(x) for x in list(label_onehot)]
        return jsonify(result)
示例#2
0
    def explain(self, model, image_path):
        image = prepare_for_prediction(model, image_path, expand_dims=False)

        label_onehot = model.predict(tf.expand_dims(image, 0))[0]
        label = tf.math.argmax(label_onehot)

        image = tf.cast(image, dtype=tf.float64)
        explainer = lime_image.LimeImageExplainer()
        explanation = explainer.explain_instance(image.numpy(),
                                                 model.predict,
                                                 top_labels=5,
                                                 hide_color=0,
                                                 num_samples=500)
        temp, mask = explanation.get_image_and_mask(label.numpy(),
                                                    positive_only=True,
                                                    num_features=5,
                                                    hide_rest=True)

        result_image = mark_boundaries(temp / 2 + 0.5, mask)
        result_image = tf.image.convert_image_dtype(result_image,
                                                    dtype=tf.uint8,
                                                    saturate=True)

        image_base46 = get_base64png(result_image)
        result = {"label_id": float(label), "image": image_base46}
        return result
示例#3
0
    def explain(self,
                model: tf.keras.Model,
                image_path: str,
                general_settings: GeneralSettings,
                tool_settings: dict = None) -> list:
        """
        Explain prediction for given model and image. The resulting dictionary should contain keys:
        - label_id: int
        - image: base64 encoded image

        Method is implemented for cases, when explaining is done one label after another.
        Provide content of explain_one()!
        Otherwise overwrite this method and you can ignore explain_one() method.

        :param model: model whose prediction we would like to explain
        :param image_path: image to be explained
        :param general_settings: common setup containing settings for every tool (number of images etc.)
        :param tool_settings: values for of a tool setup in the form of {parameter name: parameter value}
        :return: dictionary with the explanation
        """
        image = prepare_for_prediction(model, image_path)
        onehot, labels = self._get_labels(model, image, general_settings)

        tool_settings = tool_settings if tool_settings is not None else {}
        results = map(
            lambda label: {
                "label_id": int(label),
                "probability": float(onehot[label]),
                "image": self.explain_one(model, image_path, label,
                                          tool_settings),
            },
            labels,
        )
        return list(results)
    def _explain(self, model, image_path, explain_class):
        image = prepare_for_prediction(model, image_path)

        label_onehot = model.predict(image)[0]
        label = tf.math.argmax(label_onehot)
        result_image = explain_class().explain((image.numpy(), None),
                                               model,
                                               class_index=label.numpy())

        image_base46 = get_base64png(result_image)

        result = {"label_id": int(label), "image": image_base46}

        return result
示例#5
0
    def explain(self,
                model: tf.keras.Model,
                image_path: str,
                general_settings: GeneralSettings,
                tool_settings: dict = None) -> list:
        image = prepare_for_prediction(model, image_path)
        onehot, labels = self._get_labels(model, image, general_settings)

        image = tf.cast(image, dtype=tf.float64)
        explainer = lime_image.LimeImageExplainer()
        explanation = explainer.explain_instance(
            image.numpy()[0],
            model.predict,
            labels=labels,
            batch_size=tool_settings["batch_size"],
            num_features=tool_settings["num_features"],
            num_samples=tool_settings["num_samples"],
        )

        results = []
        for label in labels:
            temp, mask = explanation.get_image_and_mask(
                label,
                positive_only=tool_settings["positive_only"],
                num_features=tool_settings["superpixels"],
                min_weight=tool_settings["min_weight"],
                hide_rest=tool_settings["hide_rest"],
            )

            result_image = mark_boundaries(temp / 2 + 0.5, mask)
            result_image = tf.image.convert_image_dtype(result_image,
                                                        dtype=tf.uint8,
                                                        saturate=True)

            image_base46 = get_base64png(result_image)
            results.append({
                "label_id": int(label),
                "probability": float(onehot[label]),
                "image": image_base46,
            })

        return results
    def explain_one(self, model, image_path, label, tool_settings):
        image = prepare_for_prediction(model, image_path)
        result_image = self.explain_class().explain((image.numpy(), None), model, class_index=label, **tool_settings)
        image_base46 = get_base64png(result_image)

        return image_base46