def test_upsample_cam_1d(self): """Ensures correct output from _upsample_cam. In this case the CAM (class-activation map) is 1-D. """ this_matrix = gradcam._upsample_cam( class_activation_matrix=CLASS_ACTIV_MATRIX_COARSE_1D + 0., new_dimensions=numpy.array(CLASS_ACTIV_MATRIX_1D.shape, dtype=int)) self.assertTrue( numpy.allclose(this_matrix, CLASS_ACTIV_MATRIX_1D, atol=TOLERANCE))
def run_gradcam(model_object, predictor_matrix, activation_layer_name, vector_output_layer_name, output_neuron_indices, ideal_activation=DEFAULT_IDEAL_ACTIVATION): """Runs the Grad-CAM algorithm. H = number of heights P = number of predictor variables :param model_object: Trained model (instance of `keras.models.Model` or `keras.models.Sequential`). :param predictor_matrix: H-by-P numpy array of predictor values. :param activation_layer_name: See doc for `check_metadata`. :param vector_output_layer_name: Same. :param output_neuron_indices: Same. :param ideal_activation: same. :return: class_activations: length-H numpy array of class activations. """ # TODO(thunderhoser): Eventually make this work for dense (scalar) output # layers as well. # Check input args. error_checking.assert_is_numpy_array_without_nan(predictor_matrix) error_checking.assert_is_numpy_array(predictor_matrix, num_dimensions=2) predictor_matrix = numpy.expand_dims(predictor_matrix, axis=0) check_metadata(activation_layer_name=activation_layer_name, vector_output_layer_name=vector_output_layer_name, output_neuron_indices=output_neuron_indices, ideal_activation=ideal_activation) # Set up loss function. output_tensor = model_object.get_layer( name=vector_output_layer_name).output[:, output_neuron_indices[0], output_neuron_indices[1]] # TODO(thunderhoser): Is this right? # loss_tensor = (output_tensor - ideal_activation) ** 2 loss_tensor = output_tensor # Set up gradient function. layer_activation_tensor = (model_object.get_layer( name=activation_layer_name).output) gradient_tensor = (K.gradients(loss_tensor, [layer_activation_tensor])[0]) gradient_tensor = _normalize_tensor(gradient_tensor) gradient_function = K.function([model_object.input], [layer_activation_tensor, gradient_tensor]) # Evaluate gradient function. layer_activation_matrix, gradient_matrix = gradient_function( [predictor_matrix]) layer_activation_matrix = layer_activation_matrix[0, ...] gradient_matrix = gradient_matrix[0, ...] # Compute class-activation map in activation layer's space. mean_weight_by_filter = numpy.mean(gradient_matrix, axis=0) class_activations = numpy.full(layer_activation_matrix.shape[0], 0.) num_filters = len(mean_weight_by_filter) for k in range(num_filters): class_activations += (mean_weight_by_filter[k] * layer_activation_matrix[:, k]) num_input_heights = predictor_matrix.shape[1] class_activation_matrix = gradcam_utils._upsample_cam( class_activation_matrix=class_activations, new_dimensions=numpy.array([num_input_heights], dtype=int)) return numpy.maximum(class_activation_matrix, 0.)