def test_integration(decorrelate, fft): inceptionv1 = InceptionV1() obj = objectives.neuron("mixed3a_pre_relu", 0) param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft) rendering = render.render_vis( inceptionv1, obj, param_f=param_f, thresholds=(1, 2), verbose=False, transforms=[], ) start_image = rendering[0] end_image = rendering[-1] objective_f = objectives.neuron("mixed3a", 177) param_f = lambda: param.image(64, decorrelate=decorrelate, fft=fft) rendering = render.render_vis( inceptionv1, objective_f, param_f, verbose=False, thresholds=(0, 64), use_fixed_seed=True, ) start_image, end_image = rendering assert (start_image != end_image).any()
def test_InceptionV1_graph_import(): model = InceptionV1() model.import_graph() nodes = tf.get_default_graph().as_graph_def().node node_names = set(node.name for node in nodes) for layer_name in important_layer_names: assert "import/" + layer_name + "_pre_relu" in node_names
def test_aligned_activation_atlas(): model1 = AlexNet() layer1 = model1.layers[1] model2 = InceptionV1() layer2 = model2.layers[8] # mixed4d atlasses = aligned_activation_atlas(model1, layer1, model2, layer2, number_activations=subset) path = "tests/recipes/results/activation_atlas/aligned_atlas-{}-of-{}.jpg".format( index, len(atlasses)) for index, atlas in enumerate(atlasses): save(atlas, path)
def test_integration_any_channels(): inceptionv1 = InceptionV1() objectives_f = [ objectives.deepdream("mixed4a_pre_relu"), objectives.channel("mixed4a_pre_relu", 360), objectives.neuron("mixed3a", 177) ] params_f = [ lambda: param.grayscale_image_rgb(128), lambda: arbitrary_channels_to_rgb(128, channels=10) ] for objective_f in objectives_f: for param_f in params_f: rendering = render.render_vis( inceptionv1, objective_f, param_f, verbose=False, thresholds=(0, 64), use_fixed_seed=True, ) start_image, end_image = rendering assert (start_image != end_image).any()
def test_InceptionV1_labels(): model = InceptionV1() assert model.labels is not None assert model.labels[0] == "dummy"
def test_InceptionV1_model_download(): model = InceptionV1() model.load_graphdef() assert model.graph_def is not None
def inceptionv1(): return InceptionV1()
def inceptionv1(): model = InceptionV1() model.load_graphdef() return model
from __future__ import absolute_import, division, print_function import pytest import tensorflow as tf from lucid.modelzoo.vision_models import InceptionV1 from lucid.optvis import objectives, param, render, transform model = InceptionV1() model.load_graphdef() @pytest.mark.parametrize("decorrelate", [True, False]) @pytest.mark.parametrize("fft", [True, False]) def test_integration(decorrelate, fft): obj = objectives.neuron("mixed3a_pre_relu", 0) param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft) rendering = render.render_vis(model, obj, param_f=param_f, thresholds=(1, 2), verbose=False, transforms=[]) start_image = rendering[0] end_image = rendering[-1] assert (start_image != end_image).any()
def test_InceptionV1_aligned_activations(): model = InceptionV1() activations = get_aligned_activations(model.layers[0]) assert activations.shape == (100000, 64)