def _test_for_multiple_io(self, model): cam = Xcam(model) result = cam([CategoricalScore(0), CategoricalScore(0)], [dummy_sample( (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))]) assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10)
def test__call__if_score_is_(self, scores, expected_error, multiple_inputs_model): cam = Xcam(multiple_inputs_model) with assert_raises(expected_error): result = cam(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))]) assert len(result) == 2 assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10)
class TestScale(): @pytest.mark.parametrize( "seed_input", [dummy_sample((1, 8, 8, 3)), tf.constant(dummy_sample((1, 8, 8, 3)))]) def test__call__(self, seed_input): result = Scale()(seed_input) assert result.shape == seed_input.shape
def _test_for_multiple_io(self, model): saliency = Saliency(model) result = saliency( [CategoricalScore(0), BinaryScore(0)], [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))]) assert len(result) == 2 assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10)
def test__call__if_smoothing_is_active(self, smooth_samples, multiple_inputs_model): saliency = Saliency(multiple_inputs_model) result = saliency( CategoricalScore(0), [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], smooth_samples=smooth_samples) assert len(result) == 2 assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10)
def test__call__if_keepdims_is_(self, keepdims, expected, multiple_inputs_model): saliency = Saliency(multiple_inputs_model) result = saliency( CategoricalScore(0), [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], keepdims=keepdims) assert len(result) == 2 assert result[0].shape == expected[0] assert result[1].shape == expected[1]
def test__call__with_expand_cam(self, expand_cam, multiple_io_model): cam = Xcam(multiple_io_model) result = cam([CategoricalScore(0), BinaryScore(0)], [dummy_sample( (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], expand_cam=expand_cam) if expand_cam: assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10) else: assert result.shape == (1, 8, 8)
class TestInactiveScore(): @pytest.mark.parametrize("output,expected_shape,expected_error", [ (dummy_sample((1, 1)), (1, 1), NO_ERROR), (dummy_sample((10, 5)), (10, 5), NO_ERROR), (dummy_sample((1, 224, 224, 3)), (1, 224, 224, 3), NO_ERROR), ]) def test__call__(self, output, expected_shape, expected_error): with assert_raises(expected_error): actual = InactiveScore()(output) assert np.all(actual == 0.0) assert actual.shape == expected_shape
class TestXcamWithMultipleOutputsModel(): @pytest.mark.parametrize("scores,expected_error", [ (None, ValueError), ([None], ValueError), (CategoricalScore(0), ValueError), ([CategoricalScore(0)], ValueError), ([None, None], ValueError), ([CategoricalScore(0), None], ValueError), ([None, CategoricalScore(0)], ValueError), ([CategoricalScore(0), BinaryScore(0)], NO_ERROR), ([score_with_tuple, score_with_tuple], NO_ERROR), ([score_with_list, score_with_list], NO_ERROR), ]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__if_score_is_(self, scores, expected_error, multiple_outputs_model): cam = Xcam(multiple_outputs_model) with assert_raises(expected_error): result = cam(scores, dummy_sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8) @pytest.mark.parametrize("seed_input,expected,expected_error", [ (None, None, ValueError), (dummy_sample((8, )), None, ValueError), (dummy_sample((8, 8, 3)), (1, 8, 8), NO_ERROR), ([dummy_sample((8, 8, 3))], [(1, 8, 8)], NO_ERROR), (dummy_sample((1, 8, 8, 3)), (1, 8, 8), NO_ERROR), ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], NO_ERROR), ]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__if_seed_input_is_(self, seed_input, expected, expected_error, multiple_outputs_model): cam = Xcam(multiple_outputs_model) with assert_raises(expected_error): result = cam([CategoricalScore(0), BinaryScore(0)], seed_input) if type(expected) is list: assert type(result) is list expected = expected[0] result = result[0] assert result.shape == expected @pytest.mark.parametrize("expand_cam", [False, True]) @pytest.mark.usefixtures("xcam", "mixed_precision") def test__call__with_expand_cam(self, expand_cam, multiple_outputs_model): cam = Xcam(multiple_outputs_model) result = cam([CategoricalScore(0), BinaryScore(0)], [dummy_sample((1, 8, 8, 3))], expand_cam=expand_cam) if expand_cam: assert result[0].shape == (1, 8, 8) else: assert result.shape == (1, 6, 6)
def test__call__if_activation_modifier_is_(self, activation_modifier, conv_model): cam = GradcamPlusPlus(conv_model) result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), activation_modifier=activation_modifier) assert result.shape == (1, 8, 8)
def test__call__with_expand_cam(self, expand_cam, conv_model): cam = Xcam(conv_model) result = cam(CategoricalScore(0), [dummy_sample((1, 8, 8, 3))], expand_cam=expand_cam) if expand_cam: assert result[0].shape == (1, 8, 8) else: assert result.shape == (1, 6, 6)
def test__call__if_max_N_is_(self, max_N, expected_error, conv_model): with assert_raises(expected_error): cam = Scorecam(conv_model) result = cam(CategoricalScore(0), dummy_sample((2, 8, 8, 3)), max_N=max_N) assert result.shape == (2, 8, 8)
def test__call__with_categorical_score(self, score_class, modifier_enabled, clone_enabled, batch_size, conv_model, conv_sigmoid_model): # Release v.0.6.0@dev(May 22 2021): # Add this case to test Scorecam with CAM class. def model_modifier(model): model.layers[-1].activation = tf.keras.activations.linear if score_class is BinaryScore: model = conv_sigmoid_model else: model = conv_model score_targets = np.random.randint(0, 1, max(batch_size, 1)) score = score_class(list(score_targets)) seed_input_shape = (8, 8, 3) if batch_size > 0: seed_input_shape = (batch_size, ) + seed_input_shape seed_input = dummy_sample(seed_input_shape) cam = Xcam(model, model_modifier=model_modifier if modifier_enabled else None, clone=clone_enabled) result = cam(score, seed_input=seed_input) if modifier_enabled and clone_enabled: assert model is not cam.model else: assert model is cam.model assert result.shape == (max(batch_size, 1), 8, 8)
class TestXcamWithMultipleInputsModel(): @pytest.mark.parametrize("scores,expected_error", [ (None, ValueError), (CategoricalScore(0), NO_ERROR), (score_with_tuple, NO_ERROR), (score_with_list, NO_ERROR), ([None], ValueError), ([CategoricalScore(0)], NO_ERROR), ([score_with_tuple], NO_ERROR), ([score_with_list], NO_ERROR), ]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__if_score_is_(self, scores, expected_error, multiple_inputs_model): cam = Xcam(multiple_inputs_model) with assert_raises(expected_error): result = cam(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))]) assert len(result) == 2 assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10) @pytest.mark.parametrize("seed_input,expected_error", [ (None, ValueError), (dummy_sample((1, 8, 8, 3)), ValueError), ([dummy_sample((1, 8, 8, 3))], ValueError), ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR), ]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__if_seed_input_is_(self, seed_input, expected_error, multiple_inputs_model): cam = Xcam(multiple_inputs_model) with assert_raises(expected_error): result = cam(CategoricalScore(0), seed_input) assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10) @pytest.mark.parametrize("expand_cam", [False, True]) @pytest.mark.usefixtures("xcam", "mixed_precision") def test__call__with_expand_cam(self, expand_cam, multiple_inputs_model): cam = Xcam(multiple_inputs_model) result = cam(CategoricalScore(0), [dummy_sample( (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], expand_cam=expand_cam) if expand_cam: assert result[0].shape == (1, 8, 8) assert result[1].shape == (1, 10, 10) else: assert result.shape == (1, 8, 8)
class TestModelVisualization(): @pytest.mark.parametrize( "modifier,clone,expected_same,expected_activation", [ (None, False, True, tf.keras.activations.softmax), (None, True, True, tf.keras.activations.softmax), ('not-return', False, True, tf.keras.activations.linear), ('not-return', True, False, tf.keras.activations.linear), ('return', False, True, tf.keras.activations.linear), ('return', True, False, tf.keras.activations.linear), ]) @pytest.mark.usefixtures("mixed_precision") def test__init__(self, modifier, clone, expected_same, expected_activation, conv_model): if modifier == 'return': mock = MockVisualizer(conv_model, model_modifier=ReplaceToLinear(), clone=clone) elif modifier == 'not-return': mock = MockVisualizer(conv_model, model_modifier=ReplaceToLinear(), clone=clone) else: mock = MockVisualizer(conv_model, clone=clone) assert (mock.model is conv_model) == expected_same assert mock.model.layers[-1].activation == expected_activation assert np.array_equal(mock.model.get_weights()[0], conv_model.get_weights()[0]) @pytest.mark.parametrize("score,expected_shape", [ (dummy_sample((2, 32, 32, 3)), (2, )), ((dummy_sample((32, 32, 3)), dummy_sample((32, 32, 3))), (2, )), ([dummy_sample( (32, 32, 3)), dummy_sample((32, 32, 3))], (2, )), (tf.constant(dummy_sample((2, 32, 32, 3))), (2, )), ((tf.constant(dummy_sample( (32, 32, 3))), tf.constant(dummy_sample((32, 32, 3)))), (2, )), ([ tf.constant(dummy_sample((32, 32, 3))), tf.constant(dummy_sample((32, 32, 3))) ], (2, )), ]) @pytest.mark.usefixtures("mixed_precision") def test_mean_score_value(self, score, expected_shape, conv_model): actual = MockVisualizer(conv_model)._mean_score_value(score) assert actual.shape == expected_shape
def test__call__(self, conv_model): instance = Saliency(conv_model, model_modifier=GuidedBackpropagation()) guided_model = instance.model assert guided_model != conv_model assert guided_model.get_layer('conv_1').activation != conv_model.get_layer( 'conv_1').activation assert guided_model.get_layer('dense_1').activation == conv_model.get_layer( 'dense_1').activation instance(CategoricalScore(0), dummy_sample((1, 8, 8, 3)))
def test__call__if_penultimate_layer_is(self, penultimate_layer, seek_penultimate_conv_layer, expected_error, conv_model): cam = Xcam(conv_model) with assert_raises(expected_error): result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), penultimate_layer=penultimate_layer, seek_penultimate_conv_layer=seek_penultimate_conv_layer) assert result.shape == (1, 8, 8)
def test__call__(self, indices, output_shape, expected_error): output = tf.constant(dummy_sample(output_shape), tf.float32) score = CategoricalScore(indices) with assert_raises(expected_error): score_value = score(output) assert score_value.shape == output_shape[0:1]
def _test_for_single_io(self, model): saliency = Saliency(model) result = saliency(CategoricalScore(0), dummy_sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__if_score_is_(self, scores, expected_error, conv_model): cam = Xcam(conv_model) with assert_raises(expected_error): result = cam(scores, dummy_sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__if_keepdims_is_(self, keepdims, expected, conv_model): saliency = Saliency(conv_model) result = saliency(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), keepdims=keepdims) assert result.shape == expected
class TestXcam(): @pytest.mark.parametrize("scores,expected_error", [ (None, ValueError), (CategoricalScore(0), NO_ERROR), (score_with_tuple, NO_ERROR), (score_with_list, NO_ERROR), (score_with_tensor, NO_ERROR), ([None], ValueError), ([CategoricalScore(0)], NO_ERROR), ([score_with_tuple], NO_ERROR), ([score_with_list], NO_ERROR), ([score_with_tensor], NO_ERROR), ]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__if_score_is_(self, scores, expected_error, conv_model): cam = Xcam(conv_model) with assert_raises(expected_error): result = cam(scores, dummy_sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8) @pytest.mark.parametrize("seed_input,expected,expected_error", [ (None, None, ValueError), (dummy_sample((8, )), None, ValueError), (dummy_sample((8, 8, 3)), (1, 8, 8), NO_ERROR), ([dummy_sample((8, 8, 3))], [(1, 8, 8)], NO_ERROR), (dummy_sample((1, 8, 8, 3)), (1, 8, 8), NO_ERROR), ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], NO_ERROR), ]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__if_seed_input_is_(self, seed_input, expected, expected_error, conv_model): cam = Xcam(conv_model) with assert_raises(expected_error): result = cam(CategoricalScore(0), seed_input) if type(expected) is list: assert type(result) is list expected = expected[0] result = result[0] assert result.shape == expected @pytest.mark.parametrize("penultimate_layer,seek_penultimate_conv_layer,expected_error", [ (None, True, NO_ERROR), (-1, True, NO_ERROR), (-1.0, True, ValueError), ('dense_1', True, NO_ERROR), ('dense_1', False, ValueError), (1, False, NO_ERROR), (1, True, NO_ERROR), ('conv_1', True, NO_ERROR), (0, True, ValueError), ('input_1', True, ValueError), (CategoricalScore(0), True, ValueError), (mock_conv_model().layers[-1], False, ValueError), ]) @pytest.mark.usefixtures("xcam", "mixed_precision") def test__call__if_penultimate_layer_is(self, penultimate_layer, seek_penultimate_conv_layer, expected_error, conv_model): cam = Xcam(conv_model) with assert_raises(expected_error): result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), penultimate_layer=penultimate_layer, seek_penultimate_conv_layer=seek_penultimate_conv_layer) assert result.shape == (1, 8, 8) @pytest.mark.usefixtures("xcam", "mixed_precision") def test__call__if_expand_cam_is_False(self, conv_model): cam = Xcam(conv_model) result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), expand_cam=False) assert result.shape == (1, 6, 6) @pytest.mark.parametrize("score_class", [BinaryScore, CategoricalScore]) @pytest.mark.parametrize("modifier_enabled", [False, True]) @pytest.mark.parametrize("clone_enabled", [False, True]) @pytest.mark.parametrize("batch_size", [0, 1, 5]) @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision") def test__call__with_categorical_score(self, score_class, modifier_enabled, clone_enabled, batch_size, conv_model, conv_sigmoid_model): # Release v.0.6.0@dev(May 22 2021): # Add this case to test Scorecam with CAM class. def model_modifier(model): model.layers[-1].activation = tf.keras.activations.linear if score_class is BinaryScore: model = conv_sigmoid_model else: model = conv_model score_targets = np.random.randint(0, 1, max(batch_size, 1)) score = score_class(list(score_targets)) seed_input_shape = (8, 8, 3) if batch_size > 0: seed_input_shape = (batch_size, ) + seed_input_shape seed_input = dummy_sample(seed_input_shape) cam = Xcam(model, model_modifier=model_modifier if modifier_enabled else None, clone=clone_enabled) result = cam(score, seed_input=seed_input) if modifier_enabled and clone_enabled: assert model is not cam.model else: assert model is cam.model assert result.shape == (max(batch_size, 1), 8, 8) @pytest.mark.parametrize("expand_cam", [False, True]) @pytest.mark.usefixtures("xcam", "mixed_precision") def test__call__with_expand_cam(self, expand_cam, conv_model): cam = Xcam(conv_model) result = cam(CategoricalScore(0), [dummy_sample((1, 8, 8, 3))], expand_cam=expand_cam) if expand_cam: assert result[0].shape == (1, 8, 8) else: assert result.shape == (1, 6, 6)
def _test_for_single_io(self, model): cam = Xcam(model) result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__(self, dense_model): cam = Xcam(dense_model) with assert_raises(ValueError): result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__if_smoothing_is_active(self, smooth_samples, conv_model): saliency = Saliency(conv_model) result = saliency(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), smooth_samples=smooth_samples) assert result.shape == (1, 8, 8)
def test__call__if_model_has_only_dense_layers(self, dense_model): saliency = Saliency(dense_model) result = saliency(CategoricalScore(0), dummy_sample((8, )), keepdims=True) assert result.shape == (1, 8)
def test__call__if_normalize_gradient_is_True(self, conv_model): cam = Gradcam(conv_model) result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), normalize_gradient=True) assert result.shape == (1, 8, 8)
def test__call__if_expand_cam_is_False(self, conv_model): cam = Xcam(conv_model) result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), expand_cam=False) assert result.shape == (1, 6, 6)