def test__call__with_mutiple_outputs_model_but_losses_is_too_many(multiple_outputs_model): activation_maximization = ActivationMaximization(multiple_outputs_model) try: activation_maximization( [SmoothedLoss(1), SmoothedLoss(1), SmoothedLoss(1)], steps=1, input_modifiers=None) assert False except ValueError: assert True
def test__call__if_smoothing_is_active(cnn_model): saliency = Saliency(cnn_model) result = saliency(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), smooth_samples=1) assert result.shape == (1, 8, 8) result = saliency(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), smooth_samples=2) assert result.shape == (1, 8, 8)
def test__call__if_seed_input_is_None(cnn_model): gradcam = Gradcam(cnn_model) try: gradcam(SmoothedLoss(1), None) assert False except ValueError: assert True
def test__call__with_mutiple_outputs_model(multiple_outputs_model): activation_maximization = ActivationMaximization(multiple_outputs_model) result = activation_maximization(SmoothedLoss(1), steps=1, input_modifiers=None) assert result.shape == (1, 8, 8, 3) activation_maximization = ActivationMaximization(multiple_outputs_model) result = activation_maximization([SmoothedLoss(1), SmoothedLoss(1)], steps=1, input_modifiers=None) assert result.shape == (1, 8, 8, 3) activation_maximization = ActivationMaximization(multiple_outputs_model) result = activation_maximization([SmoothedLoss(1), SmoothedLoss(1)], steps=1, input_modifiers=None, regularizers=[TotalVariation(10.), L2Norm(10.)]) assert result.shape == (1, 8, 8, 3)
def test__call__with_mutiple_inputs_model(multiple_inputs_model): activation_maximization = ActivationMaximization(multiple_inputs_model) result = activation_maximization(SmoothedLoss(1), steps=1, input_modifiers=None) assert result[0].shape == (1, 8, 8, 3) assert result[1].shape == (1, 8, 8, 3)
def test__call__if_seed_input_is_None(cnn_model): saliency = Saliency(cnn_model) try: saliency(SmoothedLoss(1), None) assert False except ValueError: assert True
def test__call__with_callback(cnn_model): activation_maximization = ActivationMaximization(cnn_model) mock = MockCallback() result = activation_maximization(SmoothedLoss(1), steps=1, callbacks=mock) assert result.shape == (1, 8, 8, 3) assert mock.on_begin_was_called assert mock.on_call_was_called assert mock.on_end_was_called
def test__call__if_penultimate_layer_is_noexist_name(cnn_model): gradcam = Gradcam(cnn_model) try: gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), penultimate_layer='hoge') assert False except ValueError: assert True
def visualize_conv_layer_filters(model, layer_name): def model_modifier(m): new_model = tf.keras.Model( inputs=m.inputs, outputs=[m.get_layer(name=layer_name).output]) new_model.layers[-1].activation = tf.keras.activations.linear return new_model activation_maximization = ActivationMaximization(model, model_modifier) num_of_filters = 16 filter_numbers = np.random.choice( model.get_layer(name=layer_name).output.shape[-1], num_of_filters) vis_images = [] for filter_number in enumerate(filter_numbers): # Define loss function that is sum of a filter output. loss = SmoothedLoss(filter_number) # Generate max activation activation = activation_maximization(loss) image = activation[0].astype(np.uint8) vis_images.append(image) plot_images(vis_images, None, 400, layer_name)
def test__call__(cnn_model): activation_maximization = ActivationMaximization(cnn_model) result = activation_maximization(SmoothedLoss(1), steps=1) assert result.shape == (1, 8, 8, 3)
def test__call__if_keepdims_is_active(dense_model): saliency = Saliency(dense_model) result = saliency(SmoothedLoss(1), np.random.sample((3, )), keepdims=True) assert result.shape == (1, 3)
def test__call__(cnn_model): saliency = Saliency(cnn_model) result = saliency(SmoothedLoss(1), np.random.sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__if_seed_input_has_not_batch_dim(cnn_model): saliency = Saliency(cnn_model) result = saliency(SmoothedLoss(1), np.random.sample((8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__with_gradient_modifier(cnn_model): activation_maximization = ActivationMaximization(cnn_model) result = activation_maximization(SmoothedLoss(1), steps=1, gradient_modifier=lambda x: x) assert result.shape == (1, 8, 8, 3)
def test__call__with_seed_input(cnn_model): activation_maximization = ActivationMaximization(cnn_model) result = activation_maximization(SmoothedLoss(1), seed_input=np.random.sample((8, 8, 3)), steps=1) assert result.shape == (1, 8, 8, 3)
def test__call__if_loss_is_list(cnn_model): activation_maximization = ActivationMaximization(cnn_model) result = activation_maximization([SmoothedLoss(1)], steps=1) assert result.shape == (1, 8, 8, 3)
def test__call__if_seed_input_has_not_batch_dim(cnn_model): gradcam = Gradcam(cnn_model) result = gradcam(SmoothedLoss(1), np.random.sample((8, 8, 3))) assert result.shape == (1, 8, 8)
def test__call__if_penultimate_layer_is_None(cnn_model): gradcam = Gradcam(cnn_model) result = gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), penultimate_layer=None) assert result.shape == (1, 8, 8)
def test__call__(cnn_model): gradcam = Gradcam(cnn_model) result = gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3))) assert result.shape == (1, 8, 8)