def test__call__if_penultimate_layer_is_no_exist_name(model): scorecam = ScoreCAM(model) with pytest.raises(ValueError): scorecam(CategoricalScore(1, 2), np.random.sample((1, 8, 8, 3)), penultimate_layer='hoge', max_N=3)
def test__call__if_expand_cam_is_False(model): scorecam = ScoreCAM(model) result = scorecam(CategoricalScore(1, 2), np.random.sample((1, 8, 8, 3)), expand_cam=False, max_N=3) assert result.shape == (1, 6, 6)
def test__call__if_penultimate_layer_is_None(model): scorecam = ScoreCAM(model) result = scorecam(CategoricalScore(1, 2), np.random.sample((1, 8, 8, 3)), penultimate_layer=None, max_N=3) assert result.shape == (1, 8, 8)
def Explain_tfkerasvis(self, X, model, mostCommonIndex): """ Generate gradCAM plusplus images, Saliency Maps and Score cam images and save them. Seems to work very well, but it does not generate channel specific saliency maps. """ from matplotlib import pyplot as plt from matplotlib import cm import tensorflow as tf from tf_keras_vis.utils import num_of_gpus, normalize from tf_keras_vis.gradcam import GradcamPlusPlus, Gradcam from tf_keras_vis.saliency import Saliency from tf_keras_vis.scorecam import ScoreCAM # The `output` variable refer to the output of the model, # so, in this case, `output` shape is ` (samples, classes). def loss(output): # mostCommonIndex is the index determined from the inference. # This must be in correspondance with the true label, or else the map will be wrong. return (output[0][mostCommonIndex]) def model_modifier(model): model.layers[-1].activation = tf.keras.activations.linear return model # Make prediction output = model.predict(X) # Create GradcamPlusPlus object gradcamplusplus = GradcamPlusPlus(model,model_modifier=model_modifier, clone=False) # Generate heatmap with GradCAM and normalize it camplusplus = gradcamplusplus(loss, X, penultimate_layer=-1) # model.layers number camplusplus = normalize(camplusplus) # Create heat map with 4 channels heatmap_GradCamPlusPlus = np.uint8(cm.jet(camplusplus[0,:,:])[..., :3] * 255) # Create Saliency object. saliency = Saliency(model, model_modifier=model_modifier, clone=False) # Generate saliency map with smoothing that reduce noise by adding noise saliency_map = saliency(loss, X, smooth_samples=20, # The number of calculating gradients iterations. smooth_noise=0.20) # noise spread level. # Normalize map and get rid of 1 for the batch size saliency_map = normalize(saliency_map) saliency_map = saliency_map[0,:,:] # Create ScoreCAM object scorecam = ScoreCAM(model, model_modifier, clone=False) # This cell takes a lot of time on CPU, prefer GPU but doable on CPU. # Generate heatmap with ScoreCAM cam_score = scorecam(loss, X, penultimate_layer=-1, # model.layers number ) cam_score = normalize(cam_score) # Create heatmap heatmap_CamScore = np.uint8(cm.jet(cam_score[0,:,:])[..., :3] * 255) # Return data return heatmap_GradCamPlusPlus, saliency_map, heatmap_CamScore
def test__call__if_seed_input_shape_is_invalid(model): scorecam = ScoreCAM(model) try: scorecam(CategoricalScore(1, 2), np.random.sample((8, ))) assert False except (ValueError, tf.errors.InvalidArgumentError): # TF became to raise InvalidArgumentError from ver.2.0.2. assert True
def test__call__if_expand_cam_is_False_and_model_has_multiple_inputs( multiple_inputs_model): scorecam = ScoreCAM(multiple_inputs_model) result = scorecam( CategoricalScore(1, 2), [np.random.sample((1, 8, 8, 3)), np.random.sample((1, 10, 10, 3))], expand_cam=False, max_N=3) assert result.shape == (1, 8, 8)
def test__call__if_model_has_multiple_io(multiple_io_model): scorecam = ScoreCAM(multiple_io_model) result = scorecam( [CategoricalScore(1, 2), lambda x: x], [np.random.sample((1, 8, 8, 3)), np.random.sample((1, 10, 10, 3))], max_N=3) assert len(result) == 2 assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10)
def call_faster_scorecam(model, model_modifier, loss, pixel_array): # Create ScoreCAM object scorecam = ScoreCAM(model, model_modifier, clone=False) # Generate heatmap with Faster-ScoreCAM cam = scorecam( loss, pixel_array, penultimate_layer=-1, # model.layers number max_N=10) return normalize(cam)
def test__call__(model): scorecam = ScoreCAM(model) result = scorecam(CategoricalScore(1, 2), np.random.sample((1, 8, 8, 3)), max_N=3) assert result.shape == (1, 8, 8)
def test__call__if_seed_input_has_not_batch_dim(model): scorecam = ScoreCAM(model) result = scorecam(CategoricalScore(1, 2), np.random.sample((8, 8, 3)), max_N=3) assert result.shape == (1, 8, 8)
def test__call__if_seed_input_is_None(model): scorecam = ScoreCAM(model) with pytest.raises(ValueError): scorecam(CategoricalScore(1, 2), None, max_N=3)
def test__call__if_loss_is_None(model): scorecam = ScoreCAM(model) with pytest.raises(ValueError): scorecam(None, None, max_N=3)
def test__call__if_model_has_multiple_outputs(multiple_outputs_model): scorecam = ScoreCAM(multiple_outputs_model) result = scorecam([CategoricalScore(1, 2), lambda x: x], np.random.sample((1, 8, 8, 3)), max_N=3) assert result.shape == (1, 8, 8)
def test__call__if_model_has_only_dense_layer(dense_model): scorecam = ScoreCAM(dense_model) with pytest.raises(ValueError): scorecam(CategoricalScore(1, 2), np.random.sample((1, 3)))
cimg, penultimate_layer=5, # model.layers number ) cam = normalize(cam) f, ax = plt.subplots(**subplot_args) ax.imshow(cimg.squeeze(), cmap='gray') ax.imshow(cam.squeeze(), cmap='jet', alpha=0.5) # overlay plt.tight_layout() plt.show() #score cam from tf_keras_vis.scorecam import ScoreCAM scorecam = ScoreCAM(model, model_modifier, clone=True) cam = scorecam(loss, cimg, penultimate_layer=-1, # model.layers number ) cam = normalize(cam) f, ax = plt.subplots(**subplot_args) #heatmap = np.uint8(cm.jet(cam)[..., :3] * 255) ax.imshow(cimg.squeeze(), cmap='gray') ax.imshow(cam.squeeze(), cmap='jet', alpha=0.5) # overlay plt.tight_layout() plt.show()