def test_should_generate_ponderated_output(mocker): mocker.patch( "tf_explain.core.grad_cam.GradCAM.ponderate_output", side_effect=[ mocker.sentinel.ponderated_1, mocker.sentinel.ponderated_2 ], ) expected_output = [ mocker.sentinel.ponderated_1, mocker.sentinel.ponderated_2 ] outputs = [mocker.sentinel.output_1, mocker.sentinel.output_2] grads = [mocker.sentinel.grads_1, mocker.sentinel.grads_2] output = GradCAM.generate_ponderated_output(outputs, grads) for real, expected in zip(output, expected_output): assert real == expected
def explain(self, model_input, model, layer_name, class_index, colormap=cv2.COLORMAP_INFERNO): """ Compute GradCAM for a specific class index. Args: model_input (tf.tensor): Data to perform the evaluation on. model (tf.keras.Model): tf.keras model to inspect layer_name (str): Targeted layer for GradCAM class_index (int, None): Index of targeted class colormap (int): Used in parent method signature, but ignored here Returns: tf.cams: The gradcams """ outputs, guided_grads, predictions = FEGradCAM.get_gradients_and_filters( model, model_input, layer_name, class_index) cams = GradCAM.generate_ponderated_output(outputs, guided_grads) input_min = tf.reduce_min(model_input) input_max = tf.reduce_max(model_input) # Need to move input image into the 0-255 range adjust_sum = 0.0 adjust_factor = 1.0 if input_min < 0: adjust_sum = 1.0 adjust_factor /= 2.0 if input_max <= 1: adjust_factor *= 255.0 heatmaps = [ heatmap_display(cam.numpy(), (inp.numpy() + adjust_sum) * adjust_factor, colormap) for cam, inp in zip(cams, model_input) ] return heatmaps, predictions