def explain_image(self, model, data, labels):

        with DeepExplain(session=K.get_session()) as de:
            model_wo_sm = iutils.keras.graph.model_wo_softmax(model)

            input_tensor = model_wo_sm.layers[0].input
            output_tensor = model_wo_sm.layers[-1].output

            fModel = keras.models.Model(inputs=input_tensor,
                                        outputs=output_tensor)
            target_tensor = fModel(input_tensor)

            attributions = de.explain('occlusion',
                                      target_tensor * labels,
                                      input_tensor,
                                      data,
                                      window_shape=self.window_shape,
                                      step=self.step)
            attributions = np.nan_to_num(attributions)

            analysis = attributions
            analysis = iutils.postprocess_images(analysis,
                                                 color_coding='BGRtoRGB',
                                                 channels_first=False)
            analysis = ivis.gamma(analysis, minamp=0, gamma=0.95)
            analysis = ivis.heatmap(analysis)

            return analysis[0]
    def explain_image_innvestigate(self, model, data):

        try:
            # Build the model
            model = keras.models.Model(inputs=model.inputs,
                                       outputs=model.outputs)
            model.compile(optimizer="adam", loss="categorical_crossentropy")

            model_wo_sm = iutils.keras.graph.model_wo_softmax(model)

            analyzer = innvestigate.create_analyzer(self.gradient_method,
                                                    model_wo_sm)
            analysis = analyzer.analyze(data)
            analysis = iutils.postprocess_images(analysis,
                                                 color_coding='BGRtoRGB',
                                                 channels_first=False)

            analysis = ivis.gamma(analysis, minamp=0, gamma=0.95)
            analysis = ivis.heatmap(analysis)

            return analysis[0]

        except innvestigate.NotAnalyzeableModelException:
            return None

        except Exception:
            return None
示例#3
0
    def explain_image_deepexplain(self, model, data, labels):

        with DeepExplain(session=K.get_session()) as de:
            model_wo_sm = iutils.keras.graph.model_wo_softmax(model)

            input_tensor = model_wo_sm.layers[0].input
            output_tensor = model_wo_sm.layers[-1].output

            fModel = keras.models.Model(inputs=input_tensor,
                                        outputs=output_tensor)
            target_tensor = fModel(input_tensor)

            attributions = de.explain(self.lrp_method, target_tensor * labels,
                                      input_tensor, data, **self.kwargs)

            analysis = attributions
            analysis = iutils.postprocess_images(analysis,
                                                 color_coding='BGRtoRGB',
                                                 channels_first=False)
            analysis = ivis.gamma(analysis, minamp=0, gamma=0.95)
            analysis = ivis.heatmap(analysis)

            return analysis[0]
def postprocess(X, color_conversion, channels_first):
    X = X.copy()
    X = iutils.postprocess_images(
        X, color_coding=color_conversion, channels_first=channels_first)
    return X
示例#5
0
def image(X):
    X = X.copy()
    X = iutils.postprocess_images(X)
    return ivis.graymap(X, input_is_postive_only=True)
示例#6
0
def postprocess(X):
    X = X.copy()
    X = iutils.postprocess_images(X)
    return X