Exemplo n.º 1
0
def test_integration(decorrelate, fft, inceptionv1):
    obj = objectives.neuron("mixed3a_pre_relu", 0)
    param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft)
    rendering = render.render_vis(
        inceptionv1,
        obj,
        param_f=param_f,
        thresholds=(1, 2),
        verbose=False,
        transforms=[],
    )
    start_image = rendering[0]
    end_image = rendering[-1]
    objective_f = objectives.neuron("mixed3a", 177)
    param_f = lambda: param.image(64, decorrelate=decorrelate, fft=fft)
    rendering = render.render_vis(
        inceptionv1,
        objective_f,
        param_f,
        verbose=False,
        thresholds=(0, 64),
        use_fixed_seed=True,
    )
    start_image, end_image = rendering

    assert (start_image != end_image).any()
Exemplo n.º 2
0
    def render_activation_grid_very_naive(self,
                                          img,
                                          layer="mixed4d",
                                          W=42,
                                          n_steps=256):

        # Get the activations
        with tf.Graph().as_default(), tf.Session() as sess:
            t_input = tf.placeholder("float32", [None, None, None, 3])
            T = render.import_model(self.model, t_input, t_input)
            acts = T(layer).eval({t_input: img[None]})[0]
        acts_flat = acts.reshape([-1] + [acts.shape[2]])

        # Render an image for each activation vector
        def param_f():
            return param.image(W, batch=acts_flat.shape[0])

        obj = objectives.Objective.sum([
            objectives.direction(layer, v, batch=n)
            for n, v in enumerate(acts_flat)
        ])
        thresholds = (n_steps // 2, n_steps)
        vis_imgs = render.render_vis(self.model,
                                     obj,
                                     param_f,
                                     thresholds=thresholds)[-1]

        # Combine the images and display the resulting grid
        vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])
        vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]
        show(np.hstack(np.hstack(vis_imgs_cropped)))
        return vis_imgs_cropped
Exemplo n.º 3
0
    def vis_traditional(
        self,
        feature_list=None,
        *,
        transforms=[transform.jitter(2)],
        l2_coeff=0.0,
        l2_layer_name=None,
    ):
        if feature_list is None:
            feature_list = list(range(self.acts_reduced.shape[-1]))
        try:
            feature_list = list(feature_list)
        except TypeError:
            feature_list = [feature_list]

        obj = sum([
            objectives.direction_neuron(self.layer_name,
                                        self.channel_dirs[feature],
                                        batch=feature)
            for feature in feature_list
        ])
        if l2_coeff != 0.0:
            assert (
                l2_layer_name is not None
            ), "l2_layer_name must be specified if l2_coeff is non-zero"
            obj -= objectives.L2(l2_layer_name) * l2_coeff
        param_f = lambda: param.image(64, batch=len(feature_list))
        return render.render_vis(self.model,
                                 obj,
                                 param_f=param_f,
                                 transforms=transforms)[-1]
Exemplo n.º 4
0
    def render_atlas_tile(model,op_name,directions,icon_size=45,n_steps=127,transforms_amount=1,cossim_pow=0,L2_amount=2):      
        transforms_options = [
            [
                transform.jitter(2)
            ],
            [
                transform.pad(12, mode="constant", constant_value=.5),
                transform.jitter(8),
                transform.random_scale([1 + (i - 5) / 50. for i in range(11)]),
                transform.random_rotate(list(range(-10, 11)) + 5 * [0]),
                transform.jitter(4),
            ],
            [
                transform.pad(2, mode='constant', constant_value=.5),
                transform.jitter(4),
                transform.jitter(4),
                transform.jitter(8),
                transform.jitter(8),
                transform.jitter(8),
                transform.random_scale([0.995**n for n in range(-5,80)] + [0.998**n for n in 2*list(range(20,40))]),
                transform.random_rotate(list(range(-20,20))+list(range(-10,10))+list(range(-5,5))+5*[0]),
                transform.jitter(2),
            ],
        ]
        
        param_f = lambda: param.image(icon_size, batch=directions.shape[0])
        obj = objectives.Objective.sum(
          [objectives.direction_neuron(op_name, v, batch=n, cossim_pow=cossim_pow)
           for n,v in enumerate(directions)
          ]) - L2_amount * objectives.L2("input", 0.5) * objectives.L2("input", 0.5)
        thresholds=(n_steps//2, n_steps)

        vis_imgs = render.render_vis(model, obj, param_f, transforms=transforms_options[transforms_amount], thresholds=thresholds, verbose=False)[-1]

        return vis_imgs
Exemplo n.º 5
0
def neuron_groups(img, filename, layer, n_groups=10, attr_classes=None, filenumber=0):
    # Compute activations
    dirname = '../images/' + filename+'/'
    if attr_classes is None:
        attr_classes = []
    with tf.Graph().as_default(), tf.Session():
        t_input = tf.placeholder_with_default(img, [None, None, 3])
        T = render.import_model(model, t_input, t_input)
        acts = T(layer).eval()

    # We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
    # to apply Non-Negative Matrix factorization (NMF).

    nmf = ChannelReducer(n_groups, "NMF")
    spatial_factors = nmf.fit_transform(acts)[0].transpose(2, 0, 1).astype("float32")
    channel_factors = nmf._reducer.components_.astype("float32")

    # Let's organize the channels based on their horizontal position in the image

    x_peak = np.argmax(spatial_factors.max(1), 1)
    ns_sorted = np.argsort(x_peak)
    spatial_factors = spatial_factors[ns_sorted]
    channel_factors = channel_factors[ns_sorted]

    # And create a feature visualziation of each group

    param_f = lambda: param.image(80, batch=n_groups)
    obj = sum(objectives.direction(layer, channel_factors[i], batch=i)
              for i in range(n_groups))
    group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]

    # We'd also like to know about attribution

    # First, let's turn each group into a vector over activations
    group_vecs = [spatial_factors[i, ..., None] * channel_factors[i]
                  for i in range(n_groups)]

    attrs = np.asarray([raw_class_group_attr(img, layer, attr_class, group_vecs)
                        for attr_class in attr_classes])

    print(
        attrs
    )
    try:
        os.mkdir(dirname )

    except Exception as e:
        print(e)
    # Let's render the visualization!
    finally:
        with open(dirname + '/attrs.txt', 'w') as f_w:
            f_w.write(str(attrs))
        for index, icon in enumerate(group_icons):
            imgdata=to_image_url(icon)
            print(imgdata)
            imgdata = base64.b64decode(str(imgdata))
            print(imgdata)
            with open(dirname + str(index) + '.png', 'wb') as f_jpg:
                f_jpg.write(imgdata)
Exemplo n.º 6
0
 def __call__(self, layer_name, channel_index):
     obj = objectives.channel(layer_name, channel_index)
     image = render.render_vis(self.model,
                               obj,
                               param_f=self.param_f,
                               thresholds=[self.threshold],
                               use_fixed_seed=True,
                               verbose=False)
     return np.array(image[0][0])
Exemplo n.º 7
0
    def start(self):
        self.image = None
        self._doRun(True)

        obj = objectives.channel(self.layer_id, self.unit)
        self.image = render.render_vis(self.model, obj)
        #self.image = render.render_vis(self.model, self.unit_id)

        self._doRun(False)
Exemplo n.º 8
0
def neuron_groups(model, img, layer, n_groups=6, attr_classes=[]):
    # Compute activations

    with tf.Graph().as_default(), tf.Session():
        t_input = tf.placeholder_with_default(img, [None, None, 3])
        T = render.import_model(model, t_input, t_input)
        acts = T(layer).eval()

    # We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
    # to apply Non-Negative Matrix factorization (NMF).

    nmf = ChannelReducer(n_groups, "PCA")
    print(layer, n_groups)
    spatial_factors = nmf.fit_transform(acts)[0].transpose(2, 0, 1).astype("float32")
    channel_factors = nmf._reducer.components_.astype("float32")

    # Let's organize the channels based on their horizontal position in the image

    x_peak = np.argmax(spatial_factors.max(1), 1)
    ns_sorted = np.argsort(x_peak)
    spatial_factors = spatial_factors[ns_sorted]
    channel_factors = channel_factors[ns_sorted]

    # And create a feature visualziation of each group

    param_f = lambda: param.image(80, batch=n_groups)
    obj = sum(objectives.direction(layer, channel_factors[i], batch=i)
              for i in range(n_groups))
    group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]

    # We'd also like to know about attribution
    #
    # First, let's turn each group into a vector over activations
    group_vecs = [spatial_factors[i, ..., None] * channel_factors[i]
                  for i in range(n_groups)]

    attrs = np.asarray([raw_class_group_attr(img, layer, attr_class, model, group_vecs)
                        for attr_class in attr_classes])

    gray_scale_groups = [skimage.color.rgb2gray(icon) for icon in group_icons]

    # Let's render the visualization!
    data = {
        "img": _image_url(img),
        "n_groups": n_groups,
        "spatial_factors": [_image_url(factor[..., None] / np.percentile(spatial_factors, 99) * [1, 0, 0]) for factor in
                            spatial_factors],
        "group_icons": [_image_url(icon) for icon in gray_scale_groups]
    }

    # with open('ng.pickle', 'wb') as handle:
    #     pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
    # with open('./svelte_python/ng.pickle', 'rb') as p_file:
    #     data = pickle.load(p_file)

    generate_html('neuron_groups', data)
Exemplo n.º 9
0
def test_integration(decorrelate, fft):
    obj = objectives.neuron("mixed3a_pre_relu", 0)
    param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft)
    rendering = render.render_vis(model,
                                  obj,
                                  param_f=param_f,
                                  thresholds=(1, 2),
                                  verbose=False,
                                  transforms=[])
    start_image = rendering[0]
    end_image = rendering[-1]
    assert (start_image != end_image).any()
def render_feature(cppn_f=lambda: image_cppn(84),
                   optimizer=tf.train.AdamOptimizer(0.001),
                   objective=objectives.channel('noname', 0),
                   transforms=[]):
    vis = render.render_vis(m,
                            objective,
                            param_f=cppn_f,
                            optimizer=optimizer,
                            transforms=transforms,
                            thresholds=[2**i for i in range(5, 10)],
                            verbose=False)
    #show(vis)
    return vis
Exemplo n.º 11
0
    def start_multi(self):
        self.image = None
        self._doRun(True)

        logger.info("!!! running all:")
        for unit in range(self.layer_units):
            self.unit = unit
            self.notify_observers(EngineChange(unit_changed=True))
            logger.info(f"!!! running unit {unit}")
            obj = objectives.channel(self.layer_id, unit)
            self.image = render.render_vis(self.model, obj)
            if not self.running:
                break
            self._doRun(True)

        self._doRun(False)
Exemplo n.º 12
0
def visualization(learning_rate, neuron, channel, contrast, NRO_IMG, SAVE_P):
    LEARNING_RATE = learning_rate

    optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
    obj  = objectives.neuron(neuron, channel)
    imgs = render.render_vis(model,  obj,
                             optimizer=optimizer,
                             transforms=[],
                             param_f=lambda: param.image(256, fft=True, decorrelate=True, init_val=NRO_IMG),  # 256 es el tamanio de la imagen
                             thresholds=(0,2), verbose=False)


    # Note that we're doubling the image scale to make artifacts more obvious
    plt.figure()
    plt.imshow(imgs[0][0])
    plt.axis('off')
    contraste = contrast # Mover este numero hasta ver algo razonable
    plt.imshow(contraste*(imgs[1][0]-imgs[0][0]) + 0.5)
    plt.savefig(SAVE_P, bbox_inches='tight')
Exemplo n.º 13
0
def test_integration_any_channels():
    inceptionv1 = InceptionV1()
    objectives_f = [
        objectives.deepdream("mixed4a_pre_relu"),
        objectives.channel("mixed4a_pre_relu", 360),
        objectives.neuron("mixed3a", 177)
    ]
    params_f = [
        lambda: param.grayscale_image_rgb(128),
        lambda: arbitrary_channels_to_rgb(128, channels=10)
    ]
    for objective_f in objectives_f:
        for param_f in params_f:
            rendering = render.render_vis(
                inceptionv1,
                objective_f,
                param_f,
                verbose=False,
                thresholds=(0, 64),
                use_fixed_seed=True,
            )
            start_image, end_image = rendering

            assert (start_image != end_image).any()
def convert(model, inputs):
    print(inputs['z'])
    print(inputs['layer'])
    num_neurons = {'mixed3a':255, 'mixed4a':507, 'mixed5a':831}
    start = time.time()

    #set up parameters
    layer = inputs['layer'].lower()
    neuron = int(np.clip(np.abs(np.float(inputs['z'])) * 1000, 0, num_neurons[layer]))
    steps = int(inputs['steps'])
    

    #start rendering the images
    param_f = lambda: param.image(512, decorrelate=True)
    output = render.render_vis(model, layer+":"+str(neuron),
                     param_f, thresholds=(steps,), verbose = False)

    #logging
    elapsed = time.time() - start
    print(f'layer: {layer} neuron {neuron}: steps: {steps} time: {elapsed}')

    #output results
    image = output[0].squeeze() * 255
    return {'image': image.astype('uint8')}
Exemplo n.º 15
0
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"

import numpy as np
import tensorflow as tf
assert tf.__version__.startswith('1')

import lucid.modelzoo.vision_models as models
from lucid.misc.io import show
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform

# Let's import a model from the Lucid modelzoo!

model = models.InceptionV1()
model.load_graphdef()

# Visualizing a neuron is easy!

_ = render.render_vis(model, "mixed4a_pre_relu:476")
print(1)
Exemplo n.º 16
0
    input_name = 'input_1'


network = FrozenNetwork()
network.load_graphdef()

if LAYER == "-":
    images = []
    layers = []
    for l in sys.stdin:
        layers.append(l.strip())

    for layer in layers:
        for i in range(COLUMNS):
            obj = objectives.channel(layer, i)
            renders = render.render_vis(network, obj)
            assert len(renders) == 1
            image = renders[0]
            assert len(image) == 1
            image = image[0]
            images.append(image)
    images = np.array(images)
    height, width = 128, 128
    rows = len(layers)
    print(images.shape)
    assert images.shape == (rows * COLUMNS, 128, 128, 3)
    grid = (images.reshape(rows, COLUMNS, height, width,
                           3).swapaxes(1, 2).reshape(height * rows,
                                                     width * COLUMNS, 3))
    scipy.misc.imsave(OUTPUT_PREFIX + ".png", grid)
    sys.exit()
Exemplo n.º 17
0
for op in graph.get_operations():
    print(op.name, op.values()[0].shape)

LEARNING_RATE = 0.05

optimizer = tf.train.AdamOptimizer(LEARNING_RATE)

# objective = "mixed4b_pre_relu:452"
# objective = "mixed3b_pre_relu:10"
objective = "mixed5b_pre_relu:1"

thresholds = (1, 32, 128, 256)  # (1, 32, 128, 256, 2048)
imgs = render.render_vis(
    model,
    objective,
    optimizer=optimizer,
    transforms=[],
    param_f=lambda: param.image(64, fft=False, decorrelate=False),
    thresholds=thresholds,
    verbose=True)

fig([imgs])

JITTER = 1
ROTATE = 5
SCALE = 1.1

transforms = [
    transform.pad(2 * JITTER),
    transform.jitter(JITTER),
    transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
    transform.random_rotate(range(-ROTATE, ROTATE + 1))
    download = "wget --output-document hello.jpg '"+url1+"'"
    os.system(download)
    #os.system('wget --output-document auto.jpg {}'.format(url1))
    print(i)
    with open('hello.jpg', 'r+b') as f:
      with Image.open(f) as image:
          cover = resizeimage.resize_cover(image, [200, 190])
          cover.save('images/auto'+str(i)+'.jpg', image.format)
    # image optimisation
    ImageOps.equalize( Image.open("images/auto"+str(i)+".jpg")).save("images/auto"+str(i)+".jpg")
    os.system('rm hello.jpg')
    content_image = load('images/auto'+str(i)+'.jpg')[...,:3]
    style_image = load("style/style2.png")[..., :3] # choose a style

    param_f = lambda: style_transfer_param(content_image, style_image)
    content_obj = 100 * activation_difference(content_layers, difference_to=CONTENT_INDEX)
    content_obj.description = "Content Loss"

    style_obj = activation_difference(style_layers, transform_f=gram_matrix, difference_to=STYLE_INDEX)
    style_obj.description = "Style Loss"

    objective = - content_obj - style_obj

    vis = render.render_vis(model, objective, param_f=param_f, thresholds=[512], verbose=False, print_objectives=[content_obj, style_obj])[-1]
    savepath = "new/" + data['Keywords'][i] + data['Year'][i] +"_num_"+ str(i) + ".jpg"
    print("s")
    save(vis[0], savepath)
    print("end")
  except:
    continue
Exemplo n.º 19
0

class FrozenNetwork(Model):
    model_path = MODEL_PATH
    image_shape = [224, 224, 3]
    image_value_range = (0, 1)
    input_name = 'input_1'


network = FrozenNetwork()
network.load_graphdef()

#for layer in network:
#    print(layer.get_shape())

pixels = 224

param_f = lambda: param.image(pixels, fft=True, decorrelate=True)
#obj_test = objectives.channel(LAYER, NEURON_INDEX).get_shape()
#print(obj_test)
obj = objectives.channel(LAYER, NEURON_INDEX)
images = render.render_vis(network, obj, param_f, thresholds=(1024, ))
assert len(images) == 1
image = images[0]
assert len(image) == 1
image = image[0]

out_filename = LAYER.replace(
    "/", "-") + "_" + str(NEURON_INDEX) + "_" + MODEL_PATH.zfill(10) + ".png"
scipy.misc.imsave(out_filename, image)
Exemplo n.º 20
0
from lucid.modelzoo.vision_base import Model


class FrozenNetwork(Model):
    model_path = network_protobuf_path
    image_shape = [256, 256, 3]
    image_value_range = (0, 1)
    input_name = 'input_1'


network = FrozenNetwork()
network.load_graphdef()

obj = objectives.channel(layer_name, neuron_index)
param_f = lambda: param.image(512, fft=True, decorrelate=True)
renders = render.render_vis(network, obj, param_f, thresholds=(2024, ))

last_image_file = sorted(glob.glob("projection/out/*step*.png"))[-1]
stylegan_render = imageio.imread(last_image_file)
lucid_render = renders[0][0]
lucid_render = (np.clip(lucid_render, 0, 1) * 255).astype(np.uint8)

h, w = lucid_render.shape[:2]
canvas = PIL.Image.new('RGB', (w * 2, h), 'white')
canvas.paste(Image.fromarray(lucid_render), (0, 0))
canvas.paste(
    Image.fromarray(stylegan_render).resize((w, h), PIL.Image.LANCZOS), (w, 0))
canvas.save("projection/combined_%s_%03d.png" %
            (layer_name.split("/")[0], neuron_index))
def neuron_groups(imglist, filenamelist, layer, n_groups=6, attr_classes=None):
    # Compute activations
    filename = ''
    for f in filenamelist:
        filename += f
    with open('result/' + filename + '.html', 'a') as f:
        f.write('''<!DOCTYPE html>
                        <html>
                        <head >
                          <title>%s</title>
                              <script src='GroupWidget_1cb0e0d.js'></script>
                        </head>
                        <body>''' % (filename))
    for key, img in enumerate(imglist):
        if attr_classes is None:
            attr_classes = []
        with tf.Graph().as_default(), tf.Session():
            t_input = tf.placeholder_with_default(img, [None, None, 3])
            T = render.import_model(model, t_input, t_input)
            acts = T(layer).eval()

        # We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
        # to apply Non-Negative Matrix factorization (NMF).

        nmf = ChannelReducer(n_groups, "NMF")
        spatial_factors = nmf.fit_transform(acts)[0].transpose(
            2, 0, 1).astype("float32")
        channel_factors = nmf._reducer.components_.astype("float32")

        # Let's organize the channels based on their horizontal position in the image

        x_peak = np.argmax(spatial_factors.max(1), 1)
        ns_sorted = np.argsort(x_peak)
        spatial_factors = spatial_factors[ns_sorted]
        channel_factors = channel_factors[ns_sorted]

        # And create a feature visualziation of each group

        param_f = lambda: param.image(80, batch=n_groups)
        obj = sum(
            objectives.direction(layer, channel_factors[i], batch=i)
            for i in range(n_groups))
        group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]

        # We'd also like to know about attribution

        # First, let's turn each group into a vector over activations
        group_vecs = [
            spatial_factors[i, ..., None] * channel_factors[i]
            for i in range(n_groups)
        ]

        attrs = np.asarray([
            raw_class_group_attr(img, layer, attr_class, group_vecs)
            for attr_class in attr_classes
        ])

        print(attrs)

        # Let's render the visualization!

        with open('result/' + filename + '.html', 'a') as f:
            f.write('''  <main%s></main%s>
                          <script>
                            var app = new GroupWidget_1cb0e0d({
                              target: document.querySelector( 'main%s' ),''' %
                    (key, key, key))
            f.write('''data: {''')
            f.write('"img":"%s",\n' % str(_image_url(img)))
            f.write('"n_groups"' + ":" + str(n_groups) + ',\n')
            f.write('"spatial_factors"' + ":" + str([
                _image_url(factor[..., None] /
                           np.percentile(spatial_factors, 99) * [1, 0, 0])
                for factor in spatial_factors
            ]) + ',\n')
            f.write('"group_icons"' + ":" +
                    str([_image_url(icon) for icon in group_icons]) + ',\n')
            f.write('''} });''')
            f.write('''</script>''')

    with open('result/' + filename + '.html', 'a') as f:
        f.write('''</body></html >''')
    print(filename)
Exemplo n.º 22
0
#         # Compute attribution backwards from each positin in layer2
#         attrs = []
#         for i in range(acts2.shape[1]):
#             attrs_ = []
#             for j in range(acts2.shape[2]):
#                 grad = t_grad.eval({n_x: i, n_y: j, T(layer1): acts1})
#                 # linear approximation of imapct
#                 attr = np.sum(acts1 * grad, -1)[0]
#                 attrs_.append(attr)
#             attrs.append(attrs_)
#     return np.asarray(attrs)
#
# def orange_blue(a,b,clip=False):
#   if clip:
#     a,b = np.maximum(a,0), np.maximum(b,0)
#   arr = np.stack([a, (a + b)/2., b], -1)
#   arr /= 1e-2 + np.abs(arr).max()/1.5
#   arr += 0.3
#   return arr
#
# img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png")
#
# attrs = raw_spatial_spatial_attr(img, "mixed4d", "mixed5a", override=None)
# attrs = attrs / attrs.max()

_ = render.render_vis(model, "")
img = np.reshape(_, [128, 128, 3])

plt.imshow(img)
plt.show()
Exemplo n.º 23
0
model = models.VGG16_caffe()
model.load_graphdef()

model.show_graph()

"""## Visualize Neuron

See the [lucid tutorial](https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/tutorial.ipynb) to learn more.

We pick `InceptionV4/InceptionV4/Mixed_6b/concat` from above, and chose to focus on unit 0.
"""

model = models.VGG16_caffe()
model.load_graphdef()

_ = render.render_vis(model, "conv1_1/conv1_1:0")

"""## Caricature

See the [inversion and caricature notebook](https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/misc/feature_inversion_caricatures.ipynb) to learn more.
"""

from lucid.recipes.caricature import feature_inversion

img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png")

model = models.VGG16_caffe()
model.load_graphdef()

result = feature_inversion(img, model, "conv1_1/conv1_1", n_steps=512, cossim_pow=0.0)
show(result)
Exemplo n.º 24
0
print('loading model')

model = models.InceptionV1()
model.load_graphdef()
print('calculating')
neuron = ("mixed4a_pre_relu", 476)
version = 7
size = 64  #resulting image cube haa dimensions size X size X size X 3


def param_f3d(size):
    temp = imageCube(size)
    return tf.concat([
        temp,
        tf.transpose(temp, [1, 0, 2, 3]),
        tf.transpose(temp, [2, 1, 0, 3])
    ], 0)


objective = objectives.channel(*neuron)
image_cube = render.render_vis(
    model,
    objective,
    lambda: param_f3d(size),
    transforms=transform.standard_transforms,
    thresholds=(512, ))  # threshold number of steps.
#I used 4096

image_cube = np.array(image_cube)[:, :size]  #image cube
np.save(f"featureCube{size}_{version}.npy", image_cube)
Exemplo n.º 25
0
def render_facet(model,
                 neuron_obj,
                 layers,
                 style_attrs,
                 strength=(0.1, 0.3),
                 l2_weight=10.0,
                 resolution=128,
                 alpha=False):
    def mean_alpha():
        def inner(T):
            input_t = T("input")
            return tf.sqrt(tf.reduce_mean(input_t[..., 3:]**2))

        return objectives.Objective(inner)

    standard_transforms = [
        transform.pad(2, mode='constant', constant_value=.5),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.random_scale([0.995**n for n in range(-5, 80)] +
                               [0.998**n for n in 2 * list(range(20, 40))]),
        transform.random_rotate(
            list(range(-20, 20)) + list(range(-10, 10)) + list(range(-5, 5)) +
            5 * [0]),
        transform.jitter(2),
        transform.crop_or_pad_to(resolution, resolution)
    ]

    if alpha:
        standard_transforms.append(transform.collapse_alpha_random())
        param_f = lambda: param.image(resolution, batch=9, alpha=True)
    else:
        param_f = lambda: param.image(resolution, batch=9)

    optimizer = tf.train.AdamOptimizer(0.02)
    ultimate_layer = [
        n.name for n in model.graph_def.node if "image_block_4" in n.name
    ][-1]
    obj = vector(ultimate_layer, neuron_obj)
    facetsp = [(5 / len(layers)) * attr(obj, style, [layer], strength)
               for style, layer in list(zip(style_attrs, layers))]
    for facetp in facetsp:
        obj = obj + facetp
    obj = obj + l2_weight * l2()
    if alpha:
        obj -= mean_alpha()
        obj -= 1e2 * objectives.blur_alpha_each_step()
    data = render.render_vis(model,
                             obj,
                             param_f,
                             transforms=standard_transforms,
                             optimizer=optimizer,
                             thresholds=(1024 * 4, ))
    return data
Exemplo n.º 26
0
    input_name = 'input'


def show_image(image):
    html = ""
    data_url = _image_url(image)
    html += '<img width=\"100\" style=\"margin: 10px\" src=\"' + data_url + '\">'
    with open("img.html", "w") as f:
        f.write(html)
    _display_html(html)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--crop_size', type=int, default=128)
    parser.add_argument('--model_file', type=str, default='lucid_model.pb')
    args = parser.parse_args()

    model = LucidModel()
    model.model_path = args.model_file
    model.image_shape = [args.crop_size, args.crop_size, 3]

    print("Nodes in graph:")
    for node in model.graph_def.node:
        print(node.name)
    print("=" * 30)

    obj = objectives.channel("prediction/Conv2D", 0) - objectives.channel("prediction/Conv2D", 0)
    res = render.render_vis(model, obj, transforms=[])
    show_image(res)
def neuron_groups(img_name,
                  layers,
                  model,
                  attr_classes,
                  factorization_methods,
                  flag1,
                  flag_read_attr=False,
                  iter_num=100,
                  SG_path=False,
                  labels=None,
                  pos_flag=1,
                  thres_explained_var=0.7,
                  vis_random_seed=0,
                  image_size=0,
                  debug_flag=0):
    img = load(img_name)
    # img = load("./data/doghead224.jpeg")
    # img = load("./data/cathead224.jpeg")
    # img = resize(img, (224, 224, 3), order=1, mode='constant', anti_aliasing=False).astype(np.float32)

    for attr_class in attr_classes:
        root_directory = create_root_dir(img_name, attr_class, flag1)

        if flag1 == "Shap":
            AM_list_L, logit_list, channel_attr_list, kept_channel_list_L \
              = compute_shap(img, model, attr_class, layers,
                                flag1, flag_read_attr=flag_read_attr,
                                iter_num=iter_num, labels=labels, save_directory=root_directory)
        elif flag1 == "IGSG":
            AM_list_L, logit_list, channel_attr_list, kept_channel_list_L \
              = compute_igsg(img, model, attr_class, layers,
                              flag1, flag_read_attr=flag_read_attr,
                              iter_num=iter_num, SG_path=SG_path,
                              labels=labels, save_directory=root_directory)
        else:
            continue

        print_result_from_logit(logit_list, labels)
        for i_pos_neg in range(pos_flag):
            AM_list = AM_list_L[i_pos_neg]
            kept_channel_list = kept_channel_list_L[i_pos_neg]

            if debug_flag == 1:
                debug_show_AM_plus_img(AM_list, img, model)
            if i_pos_neg < 1:
                reverse_suffix = '_pos'
            else:
                reverse_suffix = '_neg'

            for layer_index, AM in enumerate(AM_list):
                layer = layers[layer_index]
                channel_attr = channel_attr_list[layer_index]
                kept_channel_index = kept_channel_list[layer_index]

                for factorization_method in factorization_methods:
                    spatial_factors, channel_factors, n_groups = \
                      decompose_AM_get_group_num(factorization_method, AM, thres_explained_var)

                    # if debug_flag == 1:
                    #   decompose_AM_with_UMAP(AM, n_groups)

                    channel_factors_max_index, channel_shap, short_index, long_index, \
                      n_groups = map_shap_attr_to_long(channel_factors, channel_attr, kept_channel_index)

                    # AM = np.squeeze(AM)
                    spatial_factors = weight_AM2spatial_factor(
                        AM, spatial_factors, n_groups, short_index,
                        kept_channel_index, channel_attr, i_pos_neg)

                    # If the attribution is negative, channel_shap should be multiply -1
                    if i_pos_neg > 0:
                        channel_shap = channel_shap * -1

                    # Sorting based on sum of Shapley values
                    ns_sorted = get_sort_groups_with_shap_scores(channel_shap)
                    every_group_attr_sorted, spatial_factors, channel_shap, n_groups =\
                      sort_groups_features(ns_sorted, spatial_factors, channel_shap, n_groups)

                    no_slash_layer_name = ''.join(layer.split('/'))
                    save_directory = create_factorization_dir(
                        root_directory, factorization_method,
                        no_slash_layer_name, reverse_suffix, n_groups)
                    gen_spatial_heat_maps(85, n_groups, spatial_factors,
                                          save_directory, attr_class,
                                          factorization_method,
                                          no_slash_layer_name, img, AM, model)
                    gen_info_txt(channel_shap, n_groups, save_directory,
                                 factorization_method, attr_class,
                                 every_group_attr_sorted)

                    # Using feature attributions times activation maps as loss function to update visualization image
                    channel_shap = channel_shap.astype("float32")
                    obj = sum(
                        utils.dot_attr_actmaps(layer, channel_shap[i], batch=i)
                        for i in range(n_groups))
                    '''
          For feature visualization, the library "lucid" will be useful because
          it has implements many loss functions of different literatures, image processing operators,
          and collected several pretrained tensorflow network.
          '''
                    transforms = [
                        transform.pad(12),
                        transform.jitter(8),
                        transform.random_scale(
                            [n / 100. for n in range(80, 120)]),
                        transform.random_rotate(
                            list(range(-10, 10)) + list(range(-5, 5)) +
                            10 * list(range(-2, 2))),
                        transform.jitter(2)
                    ]

                    # image parameterization with shared params for aligned optimizing images
                    def interpolate_f():
                        unique = fft_image(
                            (n_groups, image_size, image_size, 3),
                            random_seed=vis_random_seed)
                        shared = [
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 2, image_size // 2, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 4, image_size // 4, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 8, image_size // 8, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (2, image_size // 8, image_size // 8, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 16, image_size // 16, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (2, image_size // 16, image_size // 16, 3),
                                random_seed=vis_random_seed)
                        ]
                        return utils.to_valid_rgb(unique + sum(shared),
                                                  decorrelate=True)

                    group_icons = render.render_vis(
                        model,
                        objective_f=obj,
                        param_f=interpolate_f,
                        transforms=transforms,
                        verbose=False,
                        relu_gradient_override=False)[-1]
                    save_imgs(group_icons, save_directory, attr_class,
                              factorization_method, no_slash_layer_name)
                    print("Layer {} and class {} -> finished".format(
                        layer, attr_class))
Exemplo n.º 28
0
import tensorflow as tf
from lucid.modelzoo.vision_base import Model
import warnings
import lucid.optvis.render as render
warnings.filterwarnings('ignore')
graph_file = 'nasnet_mobile_graphdef.pb'
graph_def = tf.GraphDef()
with open(graph_file, 'rb') as f:
    graph_def.ParseFromString(f.read())
for node in graph_def.node:
    print(node.name)


class NasNetMobile(Model):
    model_path = 'nasnet_mobile_graphdef_frozen.pb.modelzoo'
    image_shape = [224, 224, 3]
    image_value_range = (0, 1)
    input_name = 'input'


if __name__ == '__main__':
    nasnet = NasNetMobile()
    _ = render.render_vis(nasnet, 'cell_0/cell_output/concat:0')
Exemplo n.º 29
0
network = FrozenNetwork()
network.load_graphdef()

if LAYER == "-":
    height, width = 144, 144
    images = []
    layers = []
    for l in sys.stdin:
        layers.append(l.strip())

    for layer in layers:
        for i in range(COLUMNS):
            param_f = lambda: param.image(height, fft=True, decorrelate=True)
            obj = objectives.channel(layer, i)
            renders = render.render_vis(network,
                                        obj,
                                        param_f,
                                        thresholds=(2048, ))
            assert len(renders) == 1
            image = renders[0]
            assert len(image) == 1
            image = image[0]
            images.append(image)
    images = np.array(images)
    rows = len(layers)
    print(images.shape)
    assert images.shape == (rows * COLUMNS, height, width, 3)
    grid = (images.reshape(rows, COLUMNS, height, width,
                           3).swapaxes(1, 2).reshape(height * rows,
                                                     width * COLUMNS, 3))
    scipy.misc.imsave(OUTPUT_PREFIX + ".png", grid)
    sys.exit()
from lucid.misc.io import show
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
#import lucid.optvis.optimizer as transfor
import tensorflow as tf
import matplotlib.pyplot as plt

model = models.InceptionV1()
model.load_graphdef()

neuron1 = ('mixed4b_pre_relu', 452)
C = lambda neuron: objectives.channel(*neuron)

out = render.render_vis(model, C(neuron1))
plt.imshow(out[0][0])

JITTER = 1
ROTATE = 5
SCALE = 1.1

transforms = [
    transform.pad(2 * JITTER),
    transform.jitter(JITTER),
    transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
    transform.random_rotate(range(-ROTATE, ROTATE + 1))
]

imgs = render.render_vis(model,
                         "mixed4b_pre_relu:452",