Exemple #1
0
def activation_maximization(model, output_layer_name, losses, seed_input):
    """
    Visualize internal representation using activation maximization
    """
    activation_maximization = ActivationMaximization(eval_model, clone=False)

    activation = activation_maximization(
        losses,
        seed_input=seed_input,
        steps=300,
        input_modifiers=[],
        regularizers=[],
        input_range=(0.0, 1.0),
        callbacks=[Print(interval=50)],
    )
    image = (activation[0, :, :, 0] * 255.0).astype(np.uint8)

    subplot_args = {
        "nrows": 1,
        "ncols": 1,
        "figsize": (5, 5),
        "subplot_kw": {
            "xticks": [],
            "yticks": []
        },
    }

    f, ax = plt.subplots(**subplot_args)
    ax.imshow(image, cmap="gray")
    plt.tight_layout()
    plt.show()
Exemple #2
0
def test__call__with_mutiple_inputs_model(multiple_inputs_model):
    activation_maximization = ActivationMaximization(multiple_inputs_model)
    result = activation_maximization(SmoothedLoss(1),
                                     steps=1,
                                     input_modifiers=None)
    assert result[0].shape == (1, 8, 8, 3)
    assert result[1].shape == (1, 8, 8, 3)
Exemple #3
0
def test__call__if_loss_is_None(cnn_model):
    activation_maximization = ActivationMaximization(cnn_model)
    try:
        activation_maximization(None, steps=1)
        assert False
    except ValueError:
        assert True
def test__call__with_mutiple_inputs_model(multiple_inputs_model):
    activation_maximization = ActivationMaximization(multiple_inputs_model)
    result = activation_maximization(CategoricalScore(1, 2),
                                     steps=1,
                                     input_modifiers=None)
    assert result[0].shape == (1, 8, 8, 3)
    assert result[1].shape == (1, 10, 10, 3)
Exemple #5
0
def test__call__with_mutiple_outputs_model(multiple_outputs_model):
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    result = activation_maximization(SmoothedLoss(1), steps=1, input_modifiers=None)
    assert result.shape == (1, 8, 8, 3)
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    result = activation_maximization([SmoothedLoss(1), SmoothedLoss(1)],
                                     steps=1,
                                     input_modifiers=None)
    assert result.shape == (1, 8, 8, 3)
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    result = activation_maximization([SmoothedLoss(1), SmoothedLoss(1)],
                                     steps=1,
                                     input_modifiers=None,
                                     regularizers=[TotalVariation(10.),
                                                   L2Norm(10.)])
    assert result.shape == (1, 8, 8, 3)
Exemple #6
0
def test__call__with_callback(cnn_model):
    activation_maximization = ActivationMaximization(cnn_model)
    mock = MockCallback()
    result = activation_maximization(SmoothedLoss(1), steps=1, callbacks=mock)
    assert result.shape == (1, 8, 8, 3)
    assert mock.on_begin_was_called
    assert mock.on_call_was_called
    assert mock.on_end_was_called
Exemple #7
0
 def test__call__(self, model, layer, expected_error):
     assert model.outputs[0].shape.as_list() == [None, 2]
     with assert_raises(expected_error):
         instance = ActivationMaximization(model,
                                           model_modifier=ExtractIntermediateLayer(layer))
         assert instance.model != model
         assert instance.model.outputs[0].shape.as_list() == [None, 6, 6, 6]
         instance([CategoricalScore(0)])
Exemple #8
0
def test__call__with_mutiple_outputs_model_but_losses_is_too_many(multiple_outputs_model):
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    try:
        activation_maximization(
            [SmoothedLoss(1), SmoothedLoss(1), SmoothedLoss(1)], steps=1, input_modifiers=None)
        assert False
    except ValueError:
        assert True
def test__call__with_callback(model):
    activation_maximization = ActivationMaximization(model)
    mock = MockCallback()
    result = activation_maximization(CategoricalScore(1, 2),
                                     steps=1,
                                     callbacks=mock)
    assert result.shape == (1, 8, 8, 3)
    assert mock.on_begin_was_called
    assert mock.on_call_was_called
    assert mock.on_end_was_called
def test__call__with_mutiple_outputs_model_but_losses_is_too_many(
        multiple_outputs_model):
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    with pytest.raises(ValueError):
        activation_maximization([
            CategoricalScore(1, 2),
            CategoricalScore(1, 2),
            CategoricalScore(1, 2)
        ],
                                steps=1,
                                input_modifiers=None)
Exemple #11
0
    def _make_actmax(self, img_array, model, layer_name, filter_idx, add_text=False):
        layer_idx = model.get_layer(name=layer_name)
        model_modifier = generate_model_modifier(layer_name)
        loss = generate_loss(filter_idx)
        activation_maximization = ActivationMaximization(model, model_modifier, clone=True)

        # Generate max activation
        activation = activation_maximization(loss, callbacks=[Print(interval=50)])
        image = activation[0].astype(np.uint8)
        image = keras.preprocessing.image.array_to_img(image)
        return image
def test__call__with_mutiple_outputs_model(multiple_outputs_model):
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    result = activation_maximization(CategoricalScore(1, 2),
                                     steps=1,
                                     input_modifiers=None)
    assert result.shape == (1, 8, 8, 3)
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    result = activation_maximization(
        [CategoricalScore(1, 2),
         CategoricalScore(1, 2)],
        steps=1,
        input_modifiers=None)
    assert result.shape == (1, 8, 8, 3)
    activation_maximization = ActivationMaximization(multiple_outputs_model)
    result = activation_maximization(
        [CategoricalScore(1, 2),
         CategoricalScore(1, 2)],
        steps=1,
        input_modifiers=None,
        regularizers=[TotalVariation(10.), L2Norm(10.)])
    assert result.shape == (1, 8, 8, 3)
Exemple #13
0
 def test__call__(self, model):
     assert model.get_layer(name='output_1').activation != tf.keras.activations.linear
     if len(model.outputs) > 1:
         assert model.get_layer(name='output_2').activation != tf.keras.activations.linear
     instance = ActivationMaximization(model, model_modifier=ReplaceToLinear())
     assert instance.model != model
     assert instance.model.get_layer(name='output_1').activation == tf.keras.activations.linear
     if len(model.outputs) > 1:
         assert instance.model.get_layer(
             name='output_2').activation == tf.keras.activations.linear
         instance([CategoricalScore(0), CategoricalScore(0)])
     else:
         instance([CategoricalScore(0)])
Exemple #14
0
def visualize_dense_layer(model, layer_name, index_label_map, itr):
    def model_modifier(m):
        m.layers[-1].activation = tf.keras.activations.linear

    imgs = []
    labels = []
    for index, label in index_label_map.items():
        activation_maximization = ActivationMaximization(model, model_modifier)
        loss = lambda x: K.mean(x[:, index - 1])
        activation = activation_maximization(loss,
                                             steps=itr,
                                             callbacks=[Print(interval=100)])
        img = activation[0].astype(np.uint8)
        img = np.squeeze(img, 2)
        cv2.imwrite(label + ".png", img)
        imgs.append(img)
        labels.append(label)
    plot_images(imgs, labels, 400, layer_name)
Exemple #15
0
def visualize_conv_layer_filters(model, layer_name):
    def model_modifier(m):
        new_model = tf.keras.Model(
            inputs=m.inputs, outputs=[m.get_layer(name=layer_name).output])
        new_model.layers[-1].activation = tf.keras.activations.linear
        return new_model

    activation_maximization = ActivationMaximization(model, model_modifier)
    num_of_filters = 16
    filter_numbers = np.random.choice(
        model.get_layer(name=layer_name).output.shape[-1], num_of_filters)
    vis_images = []
    for filter_number in enumerate(filter_numbers):
        # Define loss function that is sum of a filter output.
        loss = SmoothedLoss(filter_number)

        # Generate max activation
        activation = activation_maximization(loss)
        image = activation[0].astype(np.uint8)
        vis_images.append(image)
    plot_images(vis_images, None, 400, layer_name)
Exemple #16
0
h5_fn = ('MODELS/CNN_4_classes_old.h5')
model = keras.models.load_model(h5_fn)
print(model.summary())

########################################################################################################################
from tf_keras_vis.activation_maximization import ActivationMaximization
import tensorflow as tf


def model_modifier(m):
    m.layers[-1].activation = tf.keras.activations.linear


activation_maximization = ActivationMaximization(model,
                                                 model_modifier,
                                                 clone=False)

from tf_keras_vis.utils.callbacks import Print

for filter_number in range(numClasses):

    def loss(output):
        return output[..., filter_number]

    activation = activation_maximization(loss, callbacks=[Print(interval=50)])
    image = np.squeeze(activation[0].astype(np.uint8))

    subplot_args = {'nrows': 1, 'ncols': 1, 'figsize': (5, 5)}
    f, ax = plt.subplots(**subplot_args)
    im = ax.imshow(image)
Exemple #17
0
def test__call__with_gradient_modifier(cnn_model):
    activation_maximization = ActivationMaximization(cnn_model)
    result = activation_maximization(SmoothedLoss(1),
                                     steps=1,
                                     gradient_modifier=lambda x: x)
    assert result.shape == (1, 8, 8, 3)
Exemple #18
0
def test__call__with_seed_input(cnn_model):
    activation_maximization = ActivationMaximization(cnn_model)
    result = activation_maximization(SmoothedLoss(1),
                                     seed_input=np.random.sample((8, 8, 3)),
                                     steps=1)
    assert result.shape == (1, 8, 8, 3)
Exemple #19
0
def test__call__if_loss_is_list(cnn_model):
    activation_maximization = ActivationMaximization(cnn_model)
    result = activation_maximization([SmoothedLoss(1)], steps=1)
    assert result.shape == (1, 8, 8, 3)
Exemple #20
0
def test__call__(cnn_model):
    activation_maximization = ActivationMaximization(cnn_model)
    result = activation_maximization(SmoothedLoss(1), steps=1)
    assert result.shape == (1, 8, 8, 3)
def test__call__with_gradient_modifier(model):
    activation_maximization = ActivationMaximization(model)
    result = activation_maximization(CategoricalScore(1, 2),
                                     steps=1,
                                     gradient_modifier=lambda x: x)
    assert result.shape == (1, 8, 8, 3)
from tf_keras_vis.activation_maximization import ActivationMaximization
from tf_keras_vis.utils.callbacks import GifGenerator
from tf_keras_vis.utils.callbacks import Print
from tensorflow.keras.models import load_model

MODEL_PATH = r"C:\NN\Experiments\DropletDetection\2020-01-29 07-55-29 - MediumNet_run2\checkpoint\best_epoch-00023_val-loss-0.01.hdf5"
class_to_maximize = 0

model = load_model(MODEL_PATH)


# Define modifier to replace a softmax function of the last layer to a linear function.
def model_modifier(m):
    m.layers[-1].activation = tf.keras.activations.linear


# Create Activation Maximization object
activation_maximization = ActivationMaximization(model, model_modifier)

loss = lambda x: K.mean(x[:, class_to_maximize])

activation = activation_maximization(
    loss,
    steps=512,
    callbacks=[Print(interval=100),
               GifGenerator('activation_maximization')])
image = activation[0].astype(np.uint8)

plt.figure()
plt.imshow(image)
def test__call__if_loss_is_None(model):
    activation_maximization = ActivationMaximization(model)
    with pytest.raises(ValueError):
        activation_maximization(None, steps=1)
def test__call__(model):
    activation_maximization = ActivationMaximization(model)
    result = activation_maximization(CategoricalScore(1, 2), steps=1)
    assert result.shape == (1, 8, 8, 3)
def test__call__with_seed_input(model):
    activation_maximization = ActivationMaximization(model)
    result = activation_maximization(CategoricalScore(1, 2),
                                     seed_input=np.random.sample((8, 8, 3)),
                                     steps=1)
    assert result.shape == (1, 8, 8, 3)
def test__call__if_loss_is_list(model):
    activation_maximization = ActivationMaximization(model)
    result = activation_maximization([CategoricalScore(1, 2)], steps=1)
    assert result.shape == (1, 8, 8, 3)