Beispiel #1
0
def setup(opts):
    msg = '[SETUP] Ran with options: network = {}'
    print(msg.format(opts['network']))

    model = models.InceptionV1()
    model.load_graphdef()
    return model
def main():
    model = models.InceptionV1()
    model.load_graphdef()
    img = load(
        "https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png"
    )
    act = ActivationGrid(model)
    # very naive still takes some time to run
    result = act.render_activation_grid_very_naive(img, W=48, n_steps=1024)
    result = act.render_activation_grid_less_naive(img, W=48, n_steps=1024)
Beispiel #3
0
    def __init__(self):
        super(InceptionV1Model, self).__init__()
        self._model = models.InceptionV1()
        self._model.load_graphdef()
        self._fprop_cache = dict()
        self._scope_cache = dict()

        x = self.make_input_placeholder()
        self._default_input_placeholder = x

        activation_maps = self.fprop(x)
        self.activation_scores = {
            layer: tf.math.reduce_max(activation_maps[layer], axis=[1, 2])
            for layer in self.LAYERS
        }
    def __init__(self,
                 content,
                 overlap_px=10,
                 repeat=2,
                 num_strokes=5,
                 painter_type="GAN",
                 connected=True,
                 alternate=True,
                 bw=False,
                 learning_rate=0.1,
                 gpu_mode=True,
                 graph=None):
        self.overlap_px = overlap_px
        self.repeat = repeat
        self.full_size = 64 * repeat - overlap_px * (repeat - 1)
        self.unrepeated_num_strokes = num_strokes
        self.num_strokes = num_strokes * self.repeat**2
        self.painter_type = painter_type
        self.connected = connected
        self.alternate = alternate
        self.bw = bw
        print('full_size', self.full_size, 'max_seq_len', self.num_strokes)

        self.content = content
        self.inception_v1 = models.InceptionV1()
        self.inception_v1.load_graphdef()
        transforms = [
            transform.pad(12, mode='constant', constant_value=.5),
            transform.jitter(8),
            transform.random_scale([1 + (i - 5) / 50. for i in range(11)]),
            transform.random_rotate(list(range(-5, 5)) + 5 * [0]),
            transform.jitter(4),
        ]

        self.transform_f = render.make_transform_f(transforms)

        self.optim = render.make_optimizer(
            tf.train.AdamOptimizer(learning_rate), [])

        self.gpu_mode = gpu_mode
        if not gpu_mode:
            with tf.device('/cpu:0'):
                tf.logging.info('Model using cpu.')
                self._build_graph(graph)
        else:
            #tf.logging.info('Model using gpu.')
            self._build_graph(graph)
        self._init_session()
Beispiel #5
0
def get_lucid_orig_image_losses(args):
    #import lucid.modelzoo.vision_models as models
    #model = models.InceptionV1()
    #model.load_graphdef()
    #param_f = lambda: param.image(128, batch=4)
    #obj = objectives.channel("mixed5a", 9) - 1e2*objectives.diversity("mixed5a")
    #obj = objectives.channel("mixed5a", 9)
    #all_images = render.render_vis(model, obj, param_f)
    #image = all_images[0]

    global USE_ORG_IMPORT_MODEL
    USE_ORG_IMPORT_MODEL = True
    import lucid.modelzoo.vision_models as models
    model = models.InceptionV1()
    model.load_graphdef()
    layer = "mixed5a"
    num_units = 16
    param_f = lambda: param.image(224, batch=num_units)
    obj = objectives.channel(layer, 0, 0)
    for idx in range(1, num_units):
        obj += objectives.channel(layer, idx, idx)
    image, all_losses = render_vis(model, obj, param_f)
    return layer, image, all_losses
Beispiel #6
0
    # Load output
    # fne = np.load('fne.npy')
    # fne_stats = np.load('stats.npy')

    # Return
    return features, stats


if __name__ == '__main__':
    # This shows an example of calling the full_network_embedding method using
    # the InceptionV1 architecture pretrained on ILSVRC2012 (aka ImageNet), as
    # provided by the tensorflow lucid package. Using any other pretrained CNN
    # model is straightforward.

    # Load model
    model = models.InceptionV1()
    model.load_graphdef()

    # Define input and target tensors
    input_tensor = 'import/input:0'
    target_tensors = ['import/mixed3a:0', 'import/mixed3b:0', 'import/mixed4a:0', 'import/mixed4b:0',
                      'import/mixed4c:0', 'import/mixed4d:0', 'import/mixed4e:0', 'import/mixed5a:0',
                      'import/mixed5b:0']

    # Define images to process
    image_paths = ['../images/img1.jpg', '../images/img2.jpg', '../images/img3.jpg']
    batch_size = 2
    input_reshape = (224, 224)

    # Call FNE method
    fne_features, fne_stats = full_network_embedding(model.graph_def, image_paths, batch_size, input_tensor,
Beispiel #7
0
import lucid.modelzoo.vision_models as models
from lucid.misc.io import show, load
from lucid.misc.io.showing import _image_url, _display_html

import sys
sys.path.insert(0, '../src')
# import unittest
from attribution import ChannelAttribution

model = models.InceptionV1()  # this is GoogLeNet
model.load_graphdef()


def compare_attr_methods(attr, img, class1, class2):
    _display_html("<h2>Linear Attribution</h2>")
    attr.channel_attr(img, "mixed4d", class1, class2, mode="simple", n_show=10)

    _display_html("<br><br><h2>Path Integrated Attribution</h2>")
    attr.channel_attr(img, "mixed4d", class1, class2, mode="path", n_show=10)

    _display_html("<br><br><h2>Stochastic Path Integrated Attribution</h2>")
    attr.channel_attr(img,
                      "mixed4d",
                      class1,
                      class2,
                      mode="path",
                      n_show=10,
                      stochastic_path=True)


attr = ChannelAttribution(model)
Beispiel #8
0
}


def googlenet_spritemap(layer):
    assert layer in layer_spritemap_sizes
    size = layer_spritemap_sizes[layer]
    url = "https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg" % layer
    return size, url


model = chestnet()
model.load_graphdef()

print(model)

googlenet = models.InceptionV1()
googlenet.load_graphdef()


def googlenet_semantic_dict(layer, imgName):
    img = PIL.Image.open(imgName)

    img = img.resize((224, 224), PIL.Image.ANTIALIAS)

    # Compute the activations
    with tf.Graph().as_default(), tf.Session():
        t_input = tf.placeholder(tf.float32, [224, 224, 3])
        T = render.import_model(model, t_input, t_input)
        acts = T(layer).eval({t_input: img})[0]

    # Find the most interesting position for our initial view
Beispiel #9
0
    def textureStyle3D(self):
        print("Importing tensorflow...")
        import tensorflow as tf

        print("Checking that GPU is visible for tensorflow...")
        if not tf.test.is_gpu_available():
            raise Exception("No GPU available for tensorflow!")

        print("Importing other libraries...")
        import os
        import io
        import sys
        from string import Template
        from pathlib import Path

        import numpy as np
        import PIL.Image
        # import matplotlib.pylab as pl

        from IPython.display import clear_output, display, Image, HTML

        # if os.name != 'nt':
        #     from lucid.misc.gl.glcontext import create_opengl_context
        import OpenGL.GL as gl

        from lucid.misc.gl import meshutil
        from lucid.misc.gl import glrenderer
        import lucid.misc.io.showing as show
        import lucid.misc.io as lucid_io
        from lucid.misc.tfutil import create_session

        from lucid.modelzoo import vision_models
        from lucid.optvis import objectives
        from lucid.optvis import param
        from lucid.optvis.style import StyleLoss, mean_l1_loss
        from lucid.optvis.param.spatial import sample_bilinear

        # if os.name != 'nt':
        #     print("Creating OpenGL context...")
        #     create_opengl_context()
        gl.glGetString(gl.GL_VERSION)

        print("Loading vision model...")
        model = vision_models.InceptionV1()
        model.load_graphdef()

        def prepare_image(fn, size=None):
            data = lucid_io.reading.read(fn)
            im = PIL.Image.open(io.BytesIO(data)).convert('RGB')
            if size:
                im = im.resize(size, PIL.Image.ANTIALIAS)
            return np.float32(im) / 255.0

        self.loadCameras()

        print("Loading input model from '{}'...".format(self.input_model_path))
        mesh = meshutil.load_obj(self.input_model_path)
        if self.cameras is None:
            mesh = meshutil.normalize_mesh(mesh)

        print("Loading input texture from '{}'...".format(
            self.input_texture_path))
        original_texture = prepare_image(
            self.input_texture_path, (self.texture_size, self.texture_size))

        print("Loading style from '{}'...".format(self.style_path))
        style = prepare_image(self.style_path)

        rendering_width = self.rendering_width
        rendering_height = int(rendering_width // self.aspect_ratio)

        print("Creating renderer with resolution {}x{}...".format(
            rendering_width, rendering_height))
        renderer = glrenderer.MeshRenderer((rendering_width, rendering_height))
        if self.cameras is not None:
            print("  renderer fovy: {:.2f} degrees".format(self.max_fovy))
            renderer.fovy = self.max_fovy

        sess = create_session(timeout_sec=0)

        # t_fragments is used to feed rasterized UV coordinates for the current view.
        # Channels: [U, V, _, Alpha]. Alpha is 1 for pixels covered by the object, and
        # 0 for background.
        t_fragments = tf.placeholder(tf.float32, [None, None, 4])
        t_uv = t_fragments[..., :2]
        t_alpha = t_fragments[..., 3:]

        # Texture atlas to optimize
        t_texture = param.image(self.texture_size, fft=True,
                                decorrelate=True)[0]

        # Variable to store the original mesh texture used to render content views
        content_var = tf.Variable(tf.zeros(
            [self.texture_size, self.texture_size, 3]),
                                  trainable=False)

        # Sample current and original textures with provided pixel data
        t_joined_texture = tf.concat([t_texture, content_var], -1)
        t_joined_frame = sample_bilinear(t_joined_texture, t_uv) * t_alpha
        t_frame_current, t_frame_content = t_joined_frame[
            ..., :3], t_joined_frame[..., 3:]
        t_joined_frame = tf.stack([t_frame_current, t_frame_content], 0)

        # Feeding the rendered frames to the Neural Network
        t_input = tf.placeholder_with_default(t_joined_frame,
                                              [None, None, None, 3])
        model.import_graph(t_input)

        # style loss
        style_layers = [
            sess.graph.get_tensor_by_name('import/%s:0' % s)[0]
            for s in self.googlenet_style_layers
        ]
        # L1-loss seems to be more stable for GoogleNet
        # Note that we use style_decay>0 to average style-describing Gram matrices
        # over the recent viewports. Please refer to StyleLoss for the details.
        sl = StyleLoss(style_layers, self.style_decay, loss_func=mean_l1_loss)

        # content loss
        content_layer = sess.graph.get_tensor_by_name(
            'import/%s:0' % self.googlenet_content_layer)
        content_loss = mean_l1_loss(content_layer[0],
                                    content_layer[1]) * self.content_weight

        # setup optimization
        total_loss = content_loss + sl.style_loss
        t_lr = tf.constant(0.05)
        trainer = tf.train.AdamOptimizer(t_lr)
        train_op = trainer.minimize(total_loss)

        init_op = tf.global_variables_initializer()
        loss_log = []

        def reset(style_img, content_texture):
            del loss_log[:]
            init_op.run()
            sl.set_style({t_input: style_img[None, ...]})
            content_var.load(content_texture)

        def sample_random_view():
            if self.cameras is None:
                return meshutil.sample_view(10.0, 12.0)
            else:
                rand_m = self.cameras[np.random.randint(0, len(
                    self.cameras))]["transformToCamera"].copy()
                return rand_m

        def run(mesh, step_n=400):
            app = QtWidgets.QApplication.instance()

            for i in range(step_n):
                fragments = renderer.render_mesh(
                    modelview=sample_random_view(),
                    position=mesh['position'],
                    uv=mesh['uv'],
                    face=mesh['face'])
                _, loss = sess.run([train_op, [content_loss, sl.style_loss]],
                                   {t_fragments: fragments})
                loss_log.append(loss)
                if i == 0 or (i + 1) % 50 == 0:
                    # clear_output()
                    last_frame, last_content = sess.run(
                        [t_frame_current, t_frame_content],
                        {t_fragments: fragments})
                    # show.images([last_frame, last_content], ['current frame', 'content'])
                if i == 0 or (i + 1) % 10 == 0:
                    print(len(loss_log), loss)
                    pass

                # Show progress
                self.pBar.setValue(
                    (i + step_n // 10 + 1) / (step_n + step_n // 10) * 100)
                app.processEvents()

        reset(style, original_texture)

        print("Running {} iterations...".format(self.steps_number))
        run(mesh, step_n=self.steps_number)

        print("Finished!")
        texture = t_texture.eval()
        print("Exporting result texture to '{}'...".format(
            self.output_texture_path))
        lucid_io.save(texture, self.output_texture_path, quality=90)

        sess.close()

        print("Importing result model to Metashape '{}'...".format(
            self.result_model_path))
        chunk.model = None
        chunk.importModel(self.result_model_path)
        chunk.model.label = self.style_name

        Metashape.app.messageBox(
            "Everything worked fine!\n"
            "Please save project and RESTART Metashape!\n"
            "Because video memory was not released by TensorFlow!")
def setup(opts):
    model = models.InceptionV1()
    model.load_graphdef()
    return model
Beispiel #11
0
def main():
    '''
    Parse the arguments
    '''
    args = parse_args()
    layer = args.layer
    gpu = args.gpu
    batch = args.batch

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)

    googlenet = models.InceptionV1()
    googlenet.load_graphdef()
    nodes = googlenet.graph_def.node

    # filenames = glob.glob('/media/fred/strawberry/imagenet-tf-records/*')
    # filenames = glob.glob('test-images/imagenet-tf-records/*')
    filenames = glob.glob('/raid/hpark407/imagenet-tf-records/*')
    I_mat_dirpath = '/raid/fhohman3/I-mat/'
    # chain_dirpath = './chain/'

    num_class = 1000
    all_layers = get_layers(nodes)
    mixed_layers = [layer for layer in all_layers if 'mixed' in layer]

    layers = {
        'mixed3a': 256,
        'mixed3b': 480,
        'mixed4a': 508,
        'mixed4b': 512,
        'mixed4c': 512,
        'mixed4d': 528,
        'mixed4e': 832,
        'mixed5a': 832,
        'mixed5b': 1024
    }

    layer_fragment_sizes = {
        layer: get_channel_sizes(layer, nodes)
        for layer in mixed_layers
    }
    weight_sizes = get_weight_sizes(nodes, all_layers)
    act_sizes = get_act_sizes(weight_sizes, mixed_layers)

    k = 5
    # chain_k = 3
    mixed_layer = layer.split('_')[0]
    prev_layer = get_prev_layer(all_layers, mixed_layer)

    a_sz = act_sizes[mixed_layer]
    f_sz = layer_fragment_sizes[mixed_layer]

    frag_sz = [f_sz[0], f_sz[1], f_sz[2], f_sz[3], a_sz[1], a_sz[2]]

    outlier_nodes = [
        'mixed3a-67', 'mixed3a-190', 'mixed3b-390', 'mixed3b-399',
        'mixed3b-412'
    ]
    outlier_nodes_idx = [
        int(n.split('-')[1]) for n in outlier_nodes if layer in n
    ]

    # Get top impactful previous neurons and generate I-matrices
    # Get layer info
    is_mixed = '_' not in layer
    branch = None if is_mixed else int(layer.split('_')[-1])

    # Initialize I
    num_channel = layers[layer] if is_mixed else act_sizes[layer[:-2]][branch]
    I_layer = gen_empty_I(num_class, num_channel)

    # Run
    with tf.Graph().as_default():
        # Get dataset
        dataset = tf.data.TFRecordDataset(filenames)
        dataset = dataset.map(_parse_function)
        dataset = dataset.map(lambda img, lab, syn:
                              (preprocess_input(img), lab, syn))
        dataset = dataset.batch(batch)

        # Iterate tf-records
        iterator = dataset.make_one_shot_iterator()
        t_preprocessed_images, t_labels, t_synsets = iterator.get_next()

        # Import googlenet
        T = render.import_model(googlenet, t_preprocessed_images, None)

        # Get weight tensors
        t_w0, t_w1, t_w2, t_w3, t_w4, t_w5 = get_weight_tensors(mixed_layer)

        # Get intermediate layer tensors
        t_a0, t_a1, t_a2 = get_intermediate_layer_tensors(
            prev_layer, mixed_layer)

        # Define intermediate conv output tensors
        t_inf_0 = get_infs(t_a0, t_w0)
        t_inf_1 = get_infs(t_a1, t_w2)
        t_inf_2 = get_infs(t_a2, t_w4)
        t_inf_3 = get_infs(t_a0, t_w5)
        t_inf_4 = get_infs(t_a0, t_w1)
        t_inf_5 = get_infs(t_a0, t_w3)

        # Run with batch
        progress_counter = 0
        with tf.Session() as sess:
            start = time.time()

            try:
                with tqdm.tqdm(total=1281167, unit='imgs') as pbar:

                    while (True):
                        progress_counter += 1

                        # Run the session
                        if is_mixed:
                            labels, inf_0, inf_1, inf_2, inf_3 = sess.run(
                                [t_labels, t_inf_0, t_inf_1, t_inf_2, t_inf_3])

                        elif branch == 1:
                            labels, inf_4 = sess.run([t_labels, t_inf_4])

                        elif branch == 2:
                            labels, inf_5 = sess.run([t_labels, t_inf_5])

                        # Add up the counts
                        if is_mixed:
                            channel = 0
                            for frag, inf in enumerate(
                                [inf_0, inf_1, inf_2, inf_3]):
                                channel = update_I(layer, inf, channel,
                                                   I_layer, labels,
                                                   frag_sz[frag], k,
                                                   outlier_nodes_idx)

                        elif branch == 1:
                            update_I(layer, inf_4, 0, I_layer, labels,
                                     frag_sz[4], k, outlier_nodes_idx)

                        elif branch == 2:
                            update_I(layer, inf_5, 0, I_layer, labels,
                                     frag_sz[5], k, outlier_nodes_idx)

                        pbar.update(len(labels))
                        # print(inf_0.shape, inf_1.shape, inf_2.shape, inf_3.shape, inf_4.shape, inf_5.shape)

            except tf.errors.OutOfRangeError:
                pass

            # Save I_layer
            with open(I_mat_dirpath + 'I_%s.json' % layer, 'w') as f:
                json.dump(I_layer, f, indent=2)

            end = time.time()
            print(end - start)
            print(progress_counter)
            print(progress_counter * batch)
Beispiel #12
0
def index():
    #     return 'Hello World!'
    model = vision_models.InceptionV1()
    model.load_graphdef()
    TEXTURE_SIZE = 1024
    mesh = meshutil.load_obj('article_models/bunny.obj')
    mesh = meshutil.normalize_mesh(mesh)
    original_texture = prepare_image('article_models/bunny.png',
                                     (TEXTURE_SIZE, TEXTURE_SIZE))
    style_url = 'https://upload.wikimedia.org/wikipedia/commons/d/db/RIAN_archive_409362_Literaturnaya_Gazeta_article_about_YuriGagarin%2C_first_man_in_space.jpg'
    style = prepare_image(style_url)
    renderer = glrenderer.MeshRenderer((512, 512))
    googlenet_style_layers = [
        'conv2d2',
        'mixed3a',
        'mixed3b',
        'mixed4a',
        'mixed4b',
        'mixed4c',
    ]
    googlenet_content_layer = 'mixed3b'
    content_weight = 100.0
    # Style Gram matrix weighted average decay coefficient
    style_decay = 0.95

    sess = create_session(timeout_sec=0)

    # t_fragments is used to feed rasterized UV coordinates for the current view.
    # Channels: [U, V, _, Alpha]. Alpha is 1 for pixels covered by the object, and
    # 0 for background.
    t_fragments = tf.placeholder(tf.float32, [None, None, 4])
    t_uv = t_fragments[..., :2]
    t_alpha = t_fragments[..., 3:]

    # Texture atlas to optimize
    t_texture = param.image(TEXTURE_SIZE, fft=True, decorrelate=True)[0]

    # Variable to store the original mesh texture used to render content views
    content_var = tf.Variable(tf.zeros([TEXTURE_SIZE, TEXTURE_SIZE, 3]),
                              trainable=False)

    # Sample current and original textures with provided pixel data
    t_joined_texture = tf.concat([t_texture, content_var], -1)
    t_joined_frame = sample_bilinear(t_joined_texture, t_uv) * t_alpha
    t_frame_current, t_frame_content = t_joined_frame[..., :3], t_joined_frame[
        ..., 3:]
    t_joined_frame = tf.stack([t_frame_current, t_frame_content], 0)

    # Feeding the rendered frames to the Neural Network
    t_input = tf.placeholder_with_default(t_joined_frame,
                                          [None, None, None, 3])
    model.import_graph(t_input)

    # style loss
    style_layers = [
        sess.graph.get_tensor_by_name('import/%s:0' % s)[0]
        for s in googlenet_style_layers
    ]
    # L1-loss seems to be more stable for GoogleNet
    # Note that we use style_decay>0 to average style-describing Gram matrices
    # over the recent viewports. Please refer to StyleLoss for the details.
    sl = StyleLoss(style_layers, style_decay, loss_func=mean_l1_loss)

    # content loss
    content_layer = sess.graph.get_tensor_by_name('import/%s:0' %
                                                  googlenet_content_layer)
    content_loss = mean_l1_loss(content_layer[0],
                                content_layer[1]) * content_weight

    # setup optimization
    total_loss = content_loss + sl.style_loss
    t_lr = tf.constant(0.05)
    trainer = tf.train.AdamOptimizer(t_lr)
    train_op = trainer.minimize(total_loss)

    init_op = tf.global_variables_initializer()
    loss_log = []

    reset(style, original_texture)
    run(mesh)
    texture = t_texture.eval()
    return show.textured_mesh(mesh, texture)
def main():
    # Import a model from the lucid modelzoo
    # Or transform tensorflow slim model from https://github.com/tensorflow/models/tree/master/research/slim
    # the lucid library help you download model automatically.
    model = models.InceptionV1()
    model.load_graphdef()

    # labels_str = read("https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt")
    # labels_str = labels_str.decode("utf-8")
    # labels = [line[line.find(" "):].strip() for line in labels_str.split("\n")]
    # labels = [label[label.find(" "):].strip().replace("_", " ") for label in labels]
    # labels = ["dummy"] + labels
    labels_str = read(model.labels_path)
    labels_str = labels_str.decode("utf-8")
    labels = [line for line in labels_str.split("\n")]

    # layers = ["InceptionV1/Mixed_5c/concat", "InceptionV1/Mixed_5b/concat",
    #                 "InceptionV1/Mixed_4f/concat", "InceptionV1/Mixed_4e/concat",
    #                 "InceptionV1/Mixed_4d/concat", "InceptionV1/Mixed_4c/concat",
    #                 "InceptionV1/Mixed_4b/concat", "InceptionV1/Mixed_3b/concat",
    #                 "InceptionV1/Conv2d_2b_1x1/Relu", "InceptionV1/MaxPool_3a_3x3/MaxPool",
    #                 ]
    # layers = ["mixed5b", "mixed5a", "mixed4e", "mixed4d", "mixed4c",
    #           "mixed4b", "mixed4a", "mixed3b", "mixed3a", "maxpool1"]

    # factorization_methods = ['DictionaryLearning', 'FactorAnalysis', 'FastICA', 'IncrementalPCA',
    #                          'LatentDirichletAllocation', 'MiniBatchDictionaryLearning',
    #                          'MiniBatchSparsePCA', 'NMF', 'PCA', 'SparsePCA',
    #                          'TruncatedSVD']
    # factorization_methods = ['KernelPCA', 'SparseCoder', 'dict_learning', 'dict_learning_online', 'fastica']

    layers = ["mixed4d"]
    factorization_methods = ['FactorAnalysis']

    # attr_classes = ['Egyptian cat', 'golden retriever']
    # attr_classes = ['laptop', 'quilt']  # ['Labrador retriever', 'tennis ball', 'tiger cat']
    # attr_classes = ['tiger cat', 'Labrador retriever']
    # ('Labrador retriever', 'golden retriever')
    # [11.319051   9.532383]
    # ('Labrador retriever', 'golden retriever')
    # [8.349452  8.214619 ]
    attr_classes = ['golden retriever']

    global_random_seed = 5
    image_size = 224  # 224

    # whether load the pre-computed feature attribution
    flag_read_attr = True
    # Shapley value computing method, "Shap" or "IGSG"
    flag1 = "IGSG"
    # iteration times for computing Shapley values
    iter_num = 100
    # pos_flag=1 means only compute positive Shapley
    # = 2 means consider both positive and negative Shapley
    pos_flag = 2
    img_name = "./data/dog_cat224.jpg"

    # ---------------------------------------------------------------------------------------------------
    neuron_groups(img_name,
                  layers,
                  model,
                  attr_classes=attr_classes,
                  factorization_methods=factorization_methods,
                  flag1=flag1,
                  flag_read_attr=flag_read_attr,
                  iter_num=iter_num,
                  SG_path=False,
                  labels=labels,
                  pos_flag=pos_flag,
                  thres_explained_var=0.7,
                  vis_random_seed=global_random_seed,
                  image_size=image_size)
Beispiel #14
0
from scipy.misc import imsave

import tensorflow as tf
from lucid.modelzoo import vision_models
from lucid.misc.io import show, save, load
from lucid.optvis import objectives
from lucid.optvis import render
from lucid.misc.tfutil import create_session
from lucid.optvis.param import cppn
import lucid.optvis.param as param

from lucid.optvis.param import spatial, color, lowres

print ("Loading model")
model = vision_models.InceptionV1()
model.load_graphdef()

batch_size = 6

# Idea, after first training, try to reload the weights and only copy over the top layers...

# TESTED: size_n lower and lower training steps still works w/lower quality
#size_n = 200
#starting_training_steps = 2**10

size_n = 100
starting_training_steps = 2**9

#from skimage.transform import resize
#target_img = load("results/images/mixed4a_3x3_pre_relu_25.png")
Beispiel #15
0
def main():
    googlenet = models.InceptionV1()
    googlenet.load_graphdef()
    sd = SemanticDict(googlenet)
    sd.create_semantic_dict("mixed4d", "dog_cat.png")
Beispiel #16
0
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

import lucid.modelzoo.vision_models as model
from lucid.misc.io import show, load
from lucid.misc.io.reading import read
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
from lucid.misc.gradient_override import gradient_override_map
import cv2

model = model.InceptionV1()
model.load_graphdef()

# def raw_class_spatial_attr(img, layer, label, override=None):
#     """How much did spatial positions at a given layer effect a output class?"""
#
#     # Set up a graph for doing attribution...
#     with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
#         t_input = tf.placeholder_with_default(img, [None, None, 3])
#         T = render.import_model(model, t_input, t_input)
#
#         # Compute activations
#         acts = T(layer).eval()
#
#         if label is None: return np.zeros(acts.shape[1:-1])
#
#         # Compute gradient