Exemple #1
0
def feature_inversion(img, model, layer=None, n_steps=512, cossim_pow=0.0):
  with tf.Graph().as_default(), tf.Session() as sess:
    img = imgToModelSize(img, model)
    
    objective = objectives.Objective.sum([
        1.0 * dot_compare(layer, cossim_pow=cossim_pow),
        objectives.blur_input_each_step(),
    ])

    t_input = tf.placeholder(tf.float32, img.shape)
    param_f = param.image(img.shape[0], decorrelate=True, fft=True, alpha=False)
    param_f = tf.stack([param_f[0], t_input])

    transforms = [
      transform.pad(8, mode='constant', constant_value=.5),
      transform.jitter(8),
      transform.random_scale([0.9, 0.95, 1.05, 1.1] + [1]*4),
      transform.random_rotate(list(range(-5, 5)) + [0]*5),
      transform.jitter(2),
    ]

    T = render.make_vis_T(model, objective, param_f, transforms=transforms)
    loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")

    tf.global_variables_initializer().run()
    for i in range(n_steps): _ = sess.run([vis_op], {t_input: img})

    result = t_image.eval(feed_dict={t_input: img})
    show(result[0])
Exemple #2
0
    def render_atlas_tile(model,op_name,directions,icon_size=45,n_steps=127,transforms_amount=1,cossim_pow=0,L2_amount=2):      
        transforms_options = [
            [
                transform.jitter(2)
            ],
            [
                transform.pad(12, mode="constant", constant_value=.5),
                transform.jitter(8),
                transform.random_scale([1 + (i - 5) / 50. for i in range(11)]),
                transform.random_rotate(list(range(-10, 11)) + 5 * [0]),
                transform.jitter(4),
            ],
            [
                transform.pad(2, mode='constant', constant_value=.5),
                transform.jitter(4),
                transform.jitter(4),
                transform.jitter(8),
                transform.jitter(8),
                transform.jitter(8),
                transform.random_scale([0.995**n for n in range(-5,80)] + [0.998**n for n in 2*list(range(20,40))]),
                transform.random_rotate(list(range(-20,20))+list(range(-10,10))+list(range(-5,5))+5*[0]),
                transform.jitter(2),
            ],
        ]
        
        param_f = lambda: param.image(icon_size, batch=directions.shape[0])
        obj = objectives.Objective.sum(
          [objectives.direction_neuron(op_name, v, batch=n, cossim_pow=cossim_pow)
           for n,v in enumerate(directions)
          ]) - L2_amount * objectives.L2("input", 0.5) * objectives.L2("input", 0.5)
        thresholds=(n_steps//2, n_steps)

        vis_imgs = render.render_vis(model, obj, param_f, transforms=transforms_options[transforms_amount], thresholds=thresholds, verbose=False)[-1]

        return vis_imgs
Exemple #3
0
def generate(model, args):
    print('[GENERATE] Ran with layer {} and neuron {}'.format(
        args['layer'], args['neuron']))

    layer_id = args['layer'].split(' ')[0]
    layer_neuron = '{}:{}'.format(layer_id, args['neuron'])

    s = int(args['size'])
    min_scale = args['transform_min']
    max_scale = args['transform_max']
    scale_offset = (max_scale - min_scale) * 10

    # https://github.com/tensorflow/lucid/issues/148
    with tf.Graph().as_default() as graph, tf.Session() as sess:

        t_img = param.image(s)
        crop_W = int(s / 2)
        t_offset = tf.random.uniform((2, ), 0, s - crop_W, dtype="int32")
        t_img_crop = t_img[:, t_offset[0]:t_offset[0] + crop_W,
                           t_offset[1]:t_offset[1] + crop_W]

        if (args['transforms']):
            transforms = [
                transform.jitter(2),
                transform.random_scale(
                    [min_scale + n / 10. for n in range(20)]),
                transform.random_rotate(range(-10, 11)),
                transform.jitter(2)
            ]

            T = render.make_vis_T(model,
                                  layer_neuron,
                                  t_img_crop,
                                  transforms=transforms)
        else:
            T = render.make_vis_T(model, layer_neuron, t_img_crop)

        tf.initialize_all_variables().run()

        for i in range(1024):
            T("vis_op").run()

        img = t_img.eval()[0]

    # https://github.com/tensorflow/lucid/issues/108
    # img = render.render_vis(model, layer_neuron)[-1][0]
    img = Image.fromarray(np.uint8(img * 255))
    return {'image': img}
    def __init__(self,
                 content,
                 overlap_px=10,
                 repeat=2,
                 num_strokes=5,
                 painter_type="GAN",
                 connected=True,
                 alternate=True,
                 bw=False,
                 learning_rate=0.1,
                 gpu_mode=True,
                 graph=None):
        self.overlap_px = overlap_px
        self.repeat = repeat
        self.full_size = 64 * repeat - overlap_px * (repeat - 1)
        self.unrepeated_num_strokes = num_strokes
        self.num_strokes = num_strokes * self.repeat**2
        self.painter_type = painter_type
        self.connected = connected
        self.alternate = alternate
        self.bw = bw
        print('full_size', self.full_size, 'max_seq_len', self.num_strokes)

        self.content = content
        self.inception_v1 = models.InceptionV1()
        self.inception_v1.load_graphdef()
        transforms = [
            transform.pad(12, mode='constant', constant_value=.5),
            transform.jitter(8),
            transform.random_scale([1 + (i - 5) / 50. for i in range(11)]),
            transform.random_rotate(list(range(-5, 5)) + 5 * [0]),
            transform.jitter(4),
        ]

        self.transform_f = render.make_transform_f(transforms)

        self.optim = render.make_optimizer(
            tf.train.AdamOptimizer(learning_rate), [])

        self.gpu_mode = gpu_mode
        if not gpu_mode:
            with tf.device('/cpu:0'):
                tf.logging.info('Model using cpu.')
                self._build_graph(graph)
        else:
            #tf.logging.info('Model using gpu.')
            self._build_graph(graph)
        self._init_session()
Exemple #5
0
def feature_inversion(img,
                      model,
                      layer,
                      n_steps=512,
                      cossim_pow=0.0,
                      verbose=True):
    if isinstance(layer, str):
        layers = [layer]
    elif isinstance(layer, (tuple, list)):
        layers = layer
    else:
        raise TypeError("layer must be str, tuple or list")

    with tf.Graph().as_default(), tf.Session() as sess:
        img = imgToModelSize(img, model)

        objective = objectives.Objective.sum([
            1.0 * dot_compare(layer, cossim_pow=cossim_pow, batch=i + 1)
            for i, layer in enumerate(layers)
        ])

        t_input = tf.placeholder(tf.float32, img.shape)
        param_f = param.image(img.shape[0],
                              decorrelate=True,
                              fft=True,
                              alpha=False,
                              batch=len(layers))
        param_f = tf.concat([t_input[None], param_f], 0)

        transforms = [
            transform.pad(8, mode='constant', constant_value=.5),
            transform.jitter(8),
            transform.random_scale([0.9, 0.95, 1.05, 1.1] + [1] * 4),
            transform.random_rotate(list(range(-5, 5)) + [0] * 5),
            transform.jitter(2),
        ]

        T = render.make_vis_T(model, objective, param_f, transforms=transforms)
        loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")

        tf.global_variables_initializer().run()
        for i in range(n_steps):
            _ = sess.run([vis_op], {t_input: img})

        result = t_image.eval(feed_dict={t_input: img})
        if verbose:
            lucid.misc.io.showing.images(result[1:], layers)
        return result
Exemple #6
0
    def vis_traditional(
        self,
        feature_list=None,
        *,
        transforms=[transform.jitter(2)],
        l2_coeff=0.0,
        l2_layer_name=None,
    ):
        if feature_list is None:
            feature_list = list(range(self.acts_reduced.shape[-1]))
        try:
            feature_list = list(feature_list)
        except TypeError:
            feature_list = [feature_list]

        obj = sum([
            objectives.direction_neuron(self.layer_name,
                                        self.channel_dirs[feature],
                                        batch=feature)
            for feature in feature_list
        ])
        if l2_coeff != 0.0:
            assert (
                l2_layer_name is not None
            ), "l2_layer_name must be specified if l2_coeff is non-zero"
            obj -= objectives.L2(l2_layer_name) * l2_coeff
        param_f = lambda: param.image(64, batch=len(feature_list))
        return render.render_vis(self.model,
                                 obj,
                                 param_f=param_f,
                                 transforms=transforms)[-1]
def visualize_neuron(algo='apex',
                     env='SeaquestNoFrameskip-v4',
                     run_id=1,
                     tag="final",
                     param_f=lambda: image([1, 84, 84, 4]),
                     do_render=False,
                     transforms=[
                         transform.jitter(3),
                     ],
                     layer_no=0,
                     neuron=0,
                     regularization=0,
                     **params):
    tf.reset_default_graph()

    m = MakeAtariModel(algo, env, run_id, tag, local=False)()
    m.load_graphdef()

    if (m.layers[layer_no]['type'] == 'dense'):
        obj = objectives.channel(m.layers[layer_no]['name'], neuron)
    else:
        obj = channel(m.layers[layer_no]['name'],
                      neuron,
                      ordering=m.channel_order)

    out = optimize_input(obj + regularization,
                         m,
                         param_f,
                         transforms,
                         do_render=do_render,
                         **params)
    return out
Exemple #8
0
def get_transforms(complexity=0):
    if complexity == 4:
        return [
            transform.pad(32),
            transform.jitter(64),
            transform.random_scale([n / 100. for n in range(80, 120)]),
            transform.random_rotate(list(range(-10, 11)) + 5 * [0]),
            #transform.random_rotate( list( range( -10, 10 ) ) + list( range( -5, 5 ) ) + 10 * list( range( -2, 2 ) ) ),
            transform.jitter(8)
        ]

    if complexity == 3:
        return [
            transform.pad(16),
            transform.jitter(16),
            transform.random_scale([1 + (i - 5) / 50. for i in range(11)]),
            transform.random_rotate(list(range(-10, 11)) + 5 * [0]),
            transform.jitter(8)
        ]

    if complexity == 2:
        return transform.standard_transforms

    if complexity == 1:
        # no rotations
        return [
            transform.pad(16),
            transform.jitter(32),
            transform.random_scale([n / 100. for n in range(80, 120)]),
            transform.jitter(2)
        ]

    else:
        # no transforms
        return []
Exemple #9
0
    objective,
    optimizer=optimizer,
    transforms=[],
    param_f=lambda: param.image(64, fft=False, decorrelate=False),
    thresholds=thresholds,
    verbose=True)

fig([imgs])

JITTER = 1
ROTATE = 5
SCALE = 1.1

transforms = [
    transform.pad(2 * JITTER),
    transform.jitter(JITTER),
    transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
    transform.random_rotate(range(-ROTATE, ROTATE + 1))
]

imgs2 = render.render_vis(model,
                          objective,
                          transforms=transforms,
                          param_f=lambda: param.image(64),
                          thresholds=thresholds,
                          verbose=True)

fig([imgs, imgs2])

print('DONE')
def neuron_groups(img_name,
                  layers,
                  model,
                  attr_classes,
                  factorization_methods,
                  flag1,
                  flag_read_attr=False,
                  iter_num=100,
                  SG_path=False,
                  labels=None,
                  pos_flag=1,
                  thres_explained_var=0.7,
                  vis_random_seed=0,
                  image_size=0,
                  debug_flag=0):
    img = load(img_name)
    # img = load("./data/doghead224.jpeg")
    # img = load("./data/cathead224.jpeg")
    # img = resize(img, (224, 224, 3), order=1, mode='constant', anti_aliasing=False).astype(np.float32)

    for attr_class in attr_classes:
        root_directory = create_root_dir(img_name, attr_class, flag1)

        if flag1 == "Shap":
            AM_list_L, logit_list, channel_attr_list, kept_channel_list_L \
              = compute_shap(img, model, attr_class, layers,
                                flag1, flag_read_attr=flag_read_attr,
                                iter_num=iter_num, labels=labels, save_directory=root_directory)
        elif flag1 == "IGSG":
            AM_list_L, logit_list, channel_attr_list, kept_channel_list_L \
              = compute_igsg(img, model, attr_class, layers,
                              flag1, flag_read_attr=flag_read_attr,
                              iter_num=iter_num, SG_path=SG_path,
                              labels=labels, save_directory=root_directory)
        else:
            continue

        print_result_from_logit(logit_list, labels)
        for i_pos_neg in range(pos_flag):
            AM_list = AM_list_L[i_pos_neg]
            kept_channel_list = kept_channel_list_L[i_pos_neg]

            if debug_flag == 1:
                debug_show_AM_plus_img(AM_list, img, model)
            if i_pos_neg < 1:
                reverse_suffix = '_pos'
            else:
                reverse_suffix = '_neg'

            for layer_index, AM in enumerate(AM_list):
                layer = layers[layer_index]
                channel_attr = channel_attr_list[layer_index]
                kept_channel_index = kept_channel_list[layer_index]

                for factorization_method in factorization_methods:
                    spatial_factors, channel_factors, n_groups = \
                      decompose_AM_get_group_num(factorization_method, AM, thres_explained_var)

                    # if debug_flag == 1:
                    #   decompose_AM_with_UMAP(AM, n_groups)

                    channel_factors_max_index, channel_shap, short_index, long_index, \
                      n_groups = map_shap_attr_to_long(channel_factors, channel_attr, kept_channel_index)

                    # AM = np.squeeze(AM)
                    spatial_factors = weight_AM2spatial_factor(
                        AM, spatial_factors, n_groups, short_index,
                        kept_channel_index, channel_attr, i_pos_neg)

                    # If the attribution is negative, channel_shap should be multiply -1
                    if i_pos_neg > 0:
                        channel_shap = channel_shap * -1

                    # Sorting based on sum of Shapley values
                    ns_sorted = get_sort_groups_with_shap_scores(channel_shap)
                    every_group_attr_sorted, spatial_factors, channel_shap, n_groups =\
                      sort_groups_features(ns_sorted, spatial_factors, channel_shap, n_groups)

                    no_slash_layer_name = ''.join(layer.split('/'))
                    save_directory = create_factorization_dir(
                        root_directory, factorization_method,
                        no_slash_layer_name, reverse_suffix, n_groups)
                    gen_spatial_heat_maps(85, n_groups, spatial_factors,
                                          save_directory, attr_class,
                                          factorization_method,
                                          no_slash_layer_name, img, AM, model)
                    gen_info_txt(channel_shap, n_groups, save_directory,
                                 factorization_method, attr_class,
                                 every_group_attr_sorted)

                    # Using feature attributions times activation maps as loss function to update visualization image
                    channel_shap = channel_shap.astype("float32")
                    obj = sum(
                        utils.dot_attr_actmaps(layer, channel_shap[i], batch=i)
                        for i in range(n_groups))
                    '''
          For feature visualization, the library "lucid" will be useful because
          it has implements many loss functions of different literatures, image processing operators,
          and collected several pretrained tensorflow network.
          '''
                    transforms = [
                        transform.pad(12),
                        transform.jitter(8),
                        transform.random_scale(
                            [n / 100. for n in range(80, 120)]),
                        transform.random_rotate(
                            list(range(-10, 10)) + list(range(-5, 5)) +
                            10 * list(range(-2, 2))),
                        transform.jitter(2)
                    ]

                    # image parameterization with shared params for aligned optimizing images
                    def interpolate_f():
                        unique = fft_image(
                            (n_groups, image_size, image_size, 3),
                            random_seed=vis_random_seed)
                        shared = [
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 2, image_size // 2, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 4, image_size // 4, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 8, image_size // 8, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (2, image_size // 8, image_size // 8, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 16, image_size // 16, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (2, image_size // 16, image_size // 16, 3),
                                random_seed=vis_random_seed)
                        ]
                        return utils.to_valid_rgb(unique + sum(shared),
                                                  decorrelate=True)

                    group_icons = render.render_vis(
                        model,
                        objective_f=obj,
                        param_f=interpolate_f,
                        transforms=transforms,
                        verbose=False,
                        relu_gradient_override=False)[-1]
                    save_imgs(group_icons, save_directory, attr_class,
                              factorization_method, no_slash_layer_name)
                    print("Layer {} and class {} -> finished".format(
                        layer, attr_class))
def test_render_VGG19():

    #with tf.Graph().as_default() as graph, tf.Session() as sess:
    K.set_learning_phase(0)
    with K.get_session().as_default():
        #images = tf.placeholder("float32", [None, 224, 224, 3], name="input")

        # <Code to construct & load your model inference graph goes here>
        model = tf.keras.applications.vgg19.VGG19(include_top=False,
                                                  weights='imagenet',
                                                  input_shape=(224, 224, 3))

        #  ! il va falloir ajouter des noeuds / node pre_relu !

        os.makedirs('./model', exist_ok=True)
        #model.save('./model/keras_model.h5')
        frozen_graph = freeze_session(
            K.get_session(),
            output_names=[out.op.name for out in model.outputs])

        # Show current session graph with TensorBoard in Jupyter Notebook.
        #show_graph(tf.get_default_graph().as_graph_def())

        tf.io.write_graph(frozen_graph,
                          logdir="model",
                          name="tf_vgg19.pb",
                          as_text=False)

        nodes_tab = [
            n.name for n in tf.get_default_graph().as_graph_def().node
        ]

        # base_Model_instance = base_Model()
        # base_Model_instance.suggest_save_args()
    with tf.Graph().as_default() as graph, tf.Session() as sess:

        lucid_vgg = Lucid_VGGNet()
        lucid_vgg.load_graphdef()
        # for node in lucid_vgg.graph_def.node:
        #     if 'conv' in node.op:
        #         print(node.name)

        #Model.suggest_save_args()
        #lucid_model = Model.load_graphdef("tf_model.pb")

        LEARNING_RATE = 0.05  # Valeur par default

        optimizer = tf.train.AdamOptimizer(LEARNING_RATE)

        output_im = render.render_vis(lucid_vgg,
                                      "block1_conv1/Conv2D:0",
                                      use_fixed_seed=0)
        # Il semblerait que par default cela renvoit un crop de l image genere en 224*224 de taille 128*128
        plt.imshow(output_im[0][0])
        print(np.max(output_im), np.min(output_im))

        output_im = render.render_vis(lucid_vgg,
                                      "block1_conv1/BiasAdd:1",
                                      use_fixed_seed=0)
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg,
                                      "block1_conv1/Relu:0",
                                      use_fixed_seed=True)
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg,
                                      "block1_conv1/Relu:1",
                                      use_fixed_seed=True)
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg,
                                      "block1_conv1/Relu:2",
                                      use_fixed_seed=True)
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg,
                                      "block1_conv1/Relu:3",
                                      use_fixed_seed=True)
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg, "block5_conv4/Relu:0")
        plt.imshow(output_im[0][0])

        # <IPython.core.display.HTML object> only plot in jupyter Notebook
        param_f = lambda: param.image(128)
        output_im = render.render_vis(lucid_vgg,
                                      "block5_conv4/BiasAdd:100",
                                      param_f,
                                      thresholds=[256])
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg,
                                      "block5_conv4/Relu:100",
                                      param_f,
                                      thresholds=[256])
        plt.imshow(output_im[0][0])

        # Using alternate parameterizations is one of the primary ingredients for
        # effective visualization
        param_f = lambda: param.image(224, fft=True, decorrelate=True)
        output_im = render.render_vis(lucid_vgg, "block1_conv1/Relu:0",
                                      param_f)
        plt.imshow(output_im[0][0])
        output_im = render.render_vis(lucid_vgg, "block5_conv4/Relu:0",
                                      param_f)
        plt.imshow(output_im[0][0])

        JITTER = 1
        ROTATE = 5
        SCALE = 1.1

        transforms = [
            transform.pad(2 * JITTER),
            transform.jitter(JITTER),
            transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
            transform.random_rotate(range(-ROTATE, ROTATE + 1))
        ]

        image_full_tricks = render.render_vis(
            lucid_vgg,
            "block5_conv4/Relu:0",
            transforms=transforms,
            param_f=lambda: param.image(64, fft=True, decorrelate=True),
            optimizer=optimizer,
            thresholds=[2048],
            verbose=False)
        plt.imshow(image_full_tricks[0][0])
        image_full_tricks = render.render_vis(
            lucid_vgg,
            "block3_conv1/Relu:0",
            transforms=transforms,
            param_f=lambda: param.image(64, fft=True, decorrelate=True),
            optimizer=optimizer,
            thresholds=[2048],
            verbose=False)
        plt.imshow(image_full_tricks[0][0])
        image_full_tricks = render.render_vis(
            lucid_vgg,
            "block3_conv1/Relu:1",
            transforms=transforms,
            param_f=lambda: param.image(64, fft=False, decorrelate=False),
            optimizer=optimizer,
            thresholds=[2048],
            verbose=False)
        plt.imshow(image_full_tricks[0][0])
Exemple #12
0
def get_neuron_objective_stimuli(layer, channel):
    def get_crop_idxs(layer_str, channel):
        filtered_df = neuron_rf_sizes.loc[
            (neuron_rf_sizes["layer_name"] + "_" +
             neuron_rf_sizes["pre_post_relu"] == layer_str)
            & (neuron_rf_sizes["feature_map_number"] == channel)]

        return int(filtered_df["min_idx"].iloc[0]), int(
            filtered_df["max_idx"].iloc[0])

    img_size = 224
    padding_size = 16  # avoid edge artifacts as described in Feature Visualization blog
    param_f = lambda: param.image(img_size + 2 * padding_size,
                                  batch=number_images)
    objective_per_image = objectives.neuron(layer, channel)
    diversity_loss = -1e2 * objectives.diversity(layer)

    # transformations as described in Feature Visualization blog post
    kwargs = dict(
        thresholds=(2560, ),
        optimizer=tf.train.AdamOptimizer(learning_rate=0.05),
        transforms=[
            transform.jitter(16),
            transform.random_scale((1.0, 0.975, 1.025, 0.95, 1.05)),
            transform.random_rotate((-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)),
            transform.jitter(8),
        ],
    )

    # generate min stimuli
    _, min_stimuli, min_loss, loss_additional_global_list = render.render_vis(
        model,
        -objective_per_image,
        diversity_loss,
        param_f,
        use_fixed_seed=True,
        **kwargs,
    )
    # the optimization saves multiple states of the results
    # the last item is the final value
    min_stimuli = min_stimuli[-1]
    min_loss = min_loss[-1]
    min_loss_additional_global = loss_additional_global_list[-1]

    # undo/crop padding as described in Feature Visualization blog
    min_stimuli = min_stimuli[:, padding_size:-padding_size,
                              padding_size:-padding_size]

    # invert min_loss again
    min_loss = -min_loss

    # generate max stimuli
    _, max_stimuli, max_loss, loss_additional_global_list = render.render_vis(
        model,
        objective_per_image,
        diversity_loss,
        param_f,
        use_fixed_seed=True,
        **kwargs,
    )
    # see above
    max_stimuli = max_stimuli[-1]
    max_loss = max_loss[-1]
    max_loss_additional_global = loss_additional_global_list[-1]

    # undo/crop padding
    max_stimuli = max_stimuli[:, padding_size:-padding_size,
                              padding_size:-padding_size]

    # select crop for neuron objective
    min_idx, max_idx = get_crop_idxs(layer, channel)
    min_stimuli = min_stimuli[:, min_idx:max_idx + 1, min_idx:max_idx + 1]
    max_stimuli = max_stimuli[:, min_idx:max_idx + 1, min_idx:max_idx + 1]

    return (
        min_stimuli,
        min_loss,
        min_loss_additional_global,
        max_stimuli,
        max_loss,
        max_loss_additional_global,
    )
def get_feature_block_that_maximizeGivenOutput(model_path,list_layer_index_to_print,
                                         input_name='block1_conv1_input',sizeIm=224,\
                                         DECORRELATE = True,ROBUSTNESS  = True,
                                         dico=None,image_shape=None):

    if not (os.path.isfile(os.path.join(model_path))):
        raise (ValueError(model_path + ' does not exist !'))

    assert (not (image_shape is None))
    lucid_net = Lucid_GenericFeatureMaps(model_path=model_path,
                                         image_shape=image_shape,
                                         input_name=input_name)
    lucid_net.load_graphdef()
    nodes_tab = [n.name for n in tf.get_default_graph().as_graph_def().node]
    assert (input_name in nodes_tab)
    #print(nodes_tab)

    # `fft` parameter controls spatial decorrelation
    # `decorrelate` parameter controls channel decorrelation
    print('image_shape[2],image_shape[0], h=image_shape[1]')
    print(image_shape[2], image_shape[0], image_shape[1])
    param_f = lambda: feature_block_var(image_shape[2],
                                        image_shape[0],
                                        h=image_shape[1],
                                        batch=1,
                                        sd=None,
                                        fft=DECORRELATE)

    #print(feature_block(image_shape[2],image_shape[0], h=image_shape[1], batch=1, sd=None, fft=DECORRELATE))

    #    if DECORRELATE:
    #        ext='_Deco'
    #    else:
    #        ext=''

    if ROBUSTNESS:
        JITTER = 1
        #ROTATE = 1
        SCALE = 1.1

        transforms = [
            transform.pad(2 * JITTER),
            transform.jitter(JITTER),
            transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
            #transform.random_rotate(range(-ROTATE, ROTATE+1))
        ]
    else:
        transforms = []

    verbose = False
    # You need to  provide a dico of the correspondance between the layer name
    # an op node !
    assert (not (dico is None))

    LEARNING_RATE = 0.0005  # Valeur par default
    optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
    output_im_list = []
    for layer_index_to_print in list_layer_index_to_print:
        layer, i = layer_index_to_print

        layer_str = dico[layer]
        obj = layer + '/' + layer_str + ':' + str(i)
        print('obj', obj)

        #        output_im = render.render_vis(lucid_net,obj ,
        #                                      transforms=transforms,
        #                                      thresholds=[0],
        #                                      param_f=param_f,
        #                                      optimizer=optimizer,
        #                                      use_fixed_seed=True,
        #                                      verbose=verbose)
        output_im = lbfgs_min(lucid_net,
                              obj,
                              transforms=transforms,
                              thresholds=[2048],
                              param_f=param_f,
                              optimizer=optimizer,
                              use_fixed_seed=True,
                              verbose=verbose)
        output_im_list += [output_im]

    return (output_im_list)
def test_render_ResNet50():

    tf.reset_default_graph()
    K.set_learning_phase(0)
    if not (os.path.isfile("model/tf_resnet50.pb")):
        with K.get_session().as_default():
            model = tf.keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet',\
                                                          input_shape= (224, 224, 3))
            print(model.input)
            os.makedirs('./model', exist_ok=True)

            frozen_graph = freeze_session(
                K.get_session(),
                output_names=[out.op.name for out in model.outputs],
                clear_devices=True)
            # Save the pb model
            tf.io.write_graph(frozen_graph,
                              logdir="model",
                              name="tf_resnet50.pb",
                              as_text=False)
            nodes_tab = [
                n.name for n in tf.get_default_graph().as_graph_def().node
            ]
            print(nodes_tab)

    lucid_resnet50 = Lucid_ResNet()
    lucid_resnet50.load_graphdef()


    out = render.render_vis(lucid_resnet50, 'conv4_block6_2_conv/Conv2D:0',\
                            relu_gradient_override=True,use_fixed_seed=True)
    plt.figure()
    plt.imshow(out[0][0])


    out = render.render_vis(lucid_resnet50, 'conv2_block1_2_conv/Conv2D:32',\
                            relu_gradient_override=True,use_fixed_seed=True)
    plt.figure()
    plt.imshow(out[0][0])

    JITTER = 1
    ROTATE = 5
    SCALE = 1.1

    transforms = [
        transform.pad(2 * JITTER),
        transform.jitter(JITTER),
        transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
        transform.random_rotate(range(-ROTATE, ROTATE + 1))
    ]

    imgs = render.render_vis(lucid_resnet50,
                             'conv4_block4_2_conv/Conv2D:0',
                             transforms=transforms,
                             param_f=lambda: param.image(64),
                             thresholds=[2048],
                             verbose=False,
                             relu_gradient_override=True,
                             use_fixed_seed=True)
    plt.figure()
    plt.imshow(imgs[0][0])

    input('wait')
    plt.close()
def test_autocorr_render_Inception_v1():

    tf.reset_default_graph()
    K.set_learning_phase(0)
    if not (os.path.isfile("model/tf_inception_v1.pb")):
        with K.get_session().as_default():
            model = inception_v1_oldTF(
                weights='imagenet',
                include_top=True)  #include_top=True, weights='imagenet')
            print(model.input)
            os.makedirs('./model', exist_ok=True)

            frozen_graph = freeze_session(
                K.get_session(),
                output_names=[out.op.name for out in model.outputs],
                clear_devices=True)
            # Save the pb model
            tf.io.write_graph(frozen_graph,
                              logdir="model",
                              name="tf_inception_v1.pb",
                              as_text=False)
            nodes_tab = [
                n.name for n in tf.get_default_graph().as_graph_def().node
            ]
            print(nodes_tab)

    #with tf.Graph().as_default() as graph, tf.Session() as sess:
    with gradient_override_map({
            'Relu': redirected_relu_grad,
            'ReLU': redirected_relu_grad
    }):
        lucid_inception_v1 = Lucid_InceptionV1()
        lucid_inception_v1.load_graphdef()


    out = render.render_vis(lucid_inception_v1, 'mixed4a_1x1_pre_relu/Conv2D:0',\
                            relu_gradient_override=True,use_fixed_seed=True)
    plt.figure()
    plt.imshow(out[0][0])


    out = render.render_vis(lucid_inception_v1, 'mixed4b_pre_relu/concat:452',\
                            relu_gradient_override=True,use_fixed_seed=True)
    plt.figure()
    plt.imshow(out[0][0])

    JITTER = 1
    ROTATE = 5
    SCALE = 1.1

    transforms = [
        transform.pad(2 * JITTER),
        transform.jitter(JITTER),
        transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
        transform.random_rotate(range(-ROTATE, ROTATE + 1))
    ]

    imgs = render.render_vis(lucid_inception_v1,
                             'mixed4b_pre_relu/concat:452',
                             transforms=transforms,
                             param_f=lambda: param.image(64),
                             thresholds=[2048],
                             verbose=False,
                             relu_gradient_override=True,
                             use_fixed_seed=True)
    plt.figure()
    plt.imshow(imgs[0][0])
def test_render_Inception_v1_slim():

    K.set_learning_phase(0)
    model_path = 'model/tf_inception_v1_slim.pb'
    if not (os.path.exists(model_path)):
        with K.get_session().as_default():
            model = InceptionV1_slim(include_top=True, weights='imagenet')
            os.makedirs('./model', exist_ok=True)

            #model.save('./model/inception_v1_keras_model.h5')
            frozen_graph = freeze_session(
                K.get_session(),
                output_names=[out.op.name for out in model.outputs],
                clear_devices=True)
            # Save the pb model
            tf.io.write_graph(frozen_graph,
                              logdir="model",
                              name="tf_inception_v1_slim.pb",
                              as_text=False)

    with tf.Graph().as_default() as graph, tf.Session() as sess:

        # f = gfile.FastGFile("/model/tf_inception_v1.pb", 'rb')
        # graph_def = tf.GraphDef()
        # # Parses a serialized binary message into the current message.
        # graph_def.ParseFromString(f.read())
        # f.close()

        # sess.graph.as_default()
        # # Import a serialized TensorFlow `GraphDef` protocol buffer
        # # and place into the current default `Graph`.
        # tf.import_graph_def(graph_def)

        # nodes_tab = [n.name for n in tf.get_default_graph().as_graph_def().node]
        #print(nodes_tab)
        with gradient_override_map({
                'Relu': redirected_relu_grad,
                'ReLU': redirected_relu_grad
        }):
            # cela ne semble pas apporter quoique ce soit de particulier
            lucid_inception_v1 = Lucid_Inception_v1_slim()
            lucid_inception_v1.load_graphdef()

        neuron1 = ('mixed4b_pre_relu', 111)  # large fluffy
        C = lambda neuron: objectives.channel(*neuron)
        out = render.render_vis(lucid_inception_v1, 'Mixed_4b_Concatenated/concat:452',\
                                relu_gradient_override=True,use_fixed_seed=True)
        plt.imshow(out[0][0])

        JITTER = 1
        ROTATE = 5
        SCALE = 1.1

        transforms = [
            transform.pad(2 * JITTER),
            transform.jitter(JITTER),
            transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
            transform.random_rotate(range(-ROTATE, ROTATE + 1))
        ]
        # https://github.com/tensorflow/lucid/issues/82
        with gradient_override_map({
                'Relu': redirected_relu_grad,
                'ReLU': redirected_relu_grad
        }):
            out = render.render_vis(lucid_inception_v1, "Mixed_4b_Concatenated/concat:452", transforms=transforms,
                                     param_f=lambda: param.image(64),
                                     thresholds=[2048], verbose=False,\
                                     relu_gradient_override=True,use_fixed_seed=True)
        # out = render.render_vis(lucid_inception_v1, "Mixed_4d_Branch_2_b_3x3_act/Relu:452", transforms=transforms,
        #                          param_f=lambda: param.image(64),
        #                          thresholds=[2048], verbose=False) # Cela ne marche pas !
        plt.imshow(out[0][0])

        out = render.render_vis(lucid_inception_v1, "Mixed_3c_Concatenated/concat:479", transforms=transforms,
                                 param_f=lambda: param.image(64),
                                 thresholds=[2048], verbose=False,\
                                 relu_gradient_override=True,use_fixed_seed=True)
        plt.imshow(out[0][0])
Exemple #17
0
def get_channel_objective_stimuli(layer, channel):
    img_size = 224

    padding_size = 16
    param_f = lambda: param.image(img_size + 2 * padding_size,
                                  batch=number_images)
    objective_per_image = objectives.channel(layer, channel)
    diversity_loss = -1e2 * objectives.diversity(layer)

    # transformations as described in Feature Visualization blog post
    kwargs = dict(
        thresholds=(2560, ),
        optimizer=tf.train.AdamOptimizer(learning_rate=0.05),
        transforms=[
            transform.jitter(16),
            transform.random_scale((1.0, 0.975, 1.025, 0.95, 1.05)),
            transform.random_rotate((-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)),
            transform.jitter(8),
        ],
    )

    # generate min stimuli
    _, min_stimuli, min_loss, loss_additional_global_list = render.render_vis(
        model,
        -objective_per_image,
        diversity_loss,
        param_f,
        use_fixed_seed=True,
        **kwargs,
    )
    # the optimization saves multiple states of the results
    # the last item is the final value
    min_stimuli = min_stimuli[-1]
    min_loss = min_loss[-1]
    min_loss_additional_global = loss_additional_global_list[-1]

    # undo/crop padding
    min_stimuli = min_stimuli[:, padding_size:-padding_size,
                              padding_size:-padding_size]

    # invert loss again
    min_loss = -min_loss

    # generate max stimuli
    _, max_stimuli, max_loss, loss_additional_global_list = render.render_vis(
        model,
        objective_per_image,
        diversity_loss,
        param_f,
        use_fixed_seed=True,
        **kwargs,
    )
    # see above
    max_stimuli = max_stimuli[-1]
    max_loss = max_loss[-1]
    max_loss_additional_global = loss_additional_global_list[-1]

    # undo/crop padding
    max_stimuli = max_stimuli[:, padding_size:-padding_size,
                              padding_size:-padding_size]

    return (
        min_stimuli,
        min_loss,
        min_loss_additional_global,
        max_stimuli,
        max_loss,
        max_loss_additional_global,
    )
Exemple #18
0
def render_facet(model,
                 neuron_obj,
                 layers,
                 style_attrs,
                 strength=(0.1, 0.3),
                 l2_weight=10.0,
                 resolution=128,
                 alpha=False):
    def mean_alpha():
        def inner(T):
            input_t = T("input")
            return tf.sqrt(tf.reduce_mean(input_t[..., 3:]**2))

        return objectives.Objective(inner)

    standard_transforms = [
        transform.pad(2, mode='constant', constant_value=.5),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.jitter(4),
        transform.random_scale([0.995**n for n in range(-5, 80)] +
                               [0.998**n for n in 2 * list(range(20, 40))]),
        transform.random_rotate(
            list(range(-20, 20)) + list(range(-10, 10)) + list(range(-5, 5)) +
            5 * [0]),
        transform.jitter(2),
        transform.crop_or_pad_to(resolution, resolution)
    ]

    if alpha:
        standard_transforms.append(transform.collapse_alpha_random())
        param_f = lambda: param.image(resolution, batch=9, alpha=True)
    else:
        param_f = lambda: param.image(resolution, batch=9)

    optimizer = tf.train.AdamOptimizer(0.02)
    ultimate_layer = [
        n.name for n in model.graph_def.node if "image_block_4" in n.name
    ][-1]
    obj = vector(ultimate_layer, neuron_obj)
    facetsp = [(5 / len(layers)) * attr(obj, style, [layer], strength)
               for style, layer in list(zip(style_attrs, layers))]
    for facetp in facetsp:
        obj = obj + facetp
    obj = obj + l2_weight * l2()
    if alpha:
        obj -= mean_alpha()
        obj -= 1e2 * objectives.blur_alpha_each_step()
    data = render.render_vis(model,
                             obj,
                             param_f,
                             transforms=standard_transforms,
                             optimizer=optimizer,
                             thresholds=(1024 * 4, ))
    return data
Exemple #19
0
def render_icons(
    directions,
    model,
    layer,
    size=80,
    n_steps=128,
    verbose=False,
    S=None,
    num_attempts=3,
    cossim=True,
    alpha=False,
):

    model.load_graphdef()

    image_attempts = []
    loss_attempts = []

    depth = 4 if alpha else 3
    batch = len(directions)
    input_shape = (batch, size, size, depth)

    # Render two attempts, and pull the one with the lowest loss score.
    for attempt in range(num_attempts):

        # Render an image for each activation vector
        param_f = lambda: param.image(size,
                                      batch=len(directions),
                                      fft=True,
                                      decorrelate=True,
                                      alpha=alpha)

        if cossim is True:
            obj_list = [
                direction_neuron_cossim_S(layer, v, batch=n, S=S)
                for n, v in enumerate(directions)
            ]
        else:
            obj_list = [
                direction_neuron_S(layer, v, batch=n, S=S)
                for n, v in enumerate(directions)
            ]

        obj_list += [objectives.penalize_boundary_complexity(input_shape, w=5)]

        obj = objectives.Objective.sum(obj_list)

        # holy mother of transforms
        transforms = [
            transform.pad(16, mode='constant'),
            transform.jitter(4),
            transform.jitter(4),
            transform.jitter(8),
            transform.jitter(8),
            transform.jitter(8),
            transform.random_scale(0.998**n for n in range(20, 40)),
            transform.random_rotate(
                chain(range(-20, 20), range(-10, 10), range(-5, 5), 5 * [0])),
            transform.jitter(2),
            transform.crop_or_pad_to(size, size)
        ]
        if alpha:
            transforms.append(transform.collapse_alpha_random())

        # This is the tensorflow optimization process

        # print("attempt: ", attempt)
        with tf.Graph().as_default(), tf.Session() as sess:
            learning_rate = 0.05
            losses = []
            trainer = tf.train.AdamOptimizer(learning_rate)
            T = render.make_vis_T(model, obj, param_f, trainer, transforms)
            vis_op, t_image = T("vis_op"), T("input")
            losses_ = [obj_part(T) for obj_part in obj_list]
            tf.global_variables_initializer().run()
            for i in range(n_steps):
                loss, _ = sess.run([losses_, vis_op])
                losses.append(loss)
                # if i % 100 == 0:
                # print(i)

            img = t_image.eval()
            img_rgb = img[:, :, :, :3]
            if alpha:
                # print("alpha true")
                k = 0.8
                bg_color = 0.0
                img_a = img[:, :, :, 3:]
                img_merged = img_rgb * (
                    (1 - k) + k * img_a) + bg_color * k * (1 - img_a)
                image_attempts.append(img_merged)
            else:
                # print("alpha false")
                image_attempts.append(img_rgb)

            loss_attempts.append(losses[-1])

    # Use only the icons with the lowest loss
    loss_attempts = np.asarray(loss_attempts)
    loss_final = []
    image_final = []
    # print("merging best scores from attempts...")
    for i, d in enumerate(directions):
        # note, this should be max, it is not a traditional loss
        mi = np.argmax(loss_attempts[:, i])
        image_final.append(image_attempts[mi][i])

    return (image_final, loss_final)
Exemple #20
0
    def run(self,
            layer,
            class_,
            channel=None,
            style_template=None,
            transforms=False,
            opt_steps=500,
            gram_coeff=1e-14):
        """
    layer         : layer_name to visualize
    class_        : class to consider
    style_template: template for comparision of generated activation maximization map
    transforms    : transforms required
    opt_steps     : number of optimization steps
    """

        self.layer = layer
        self.channel = channel if channel is not None else 0

        with tf.Graph().as_default() as graph, tf.Session() as sess:

            if style_template is not None:

                try:
                    gram_template = tf.constant(
                        np.load(style_template),  #[1:-1,:,:],
                        dtype=tf.float32)
                except:
                    image = cv2.imread(style_template)
                    print(image.shape)
                    gram_template = tf.constant(
                        np.pad(cv2.imread(style_template),
                               ((1, 1), (0, 0))),  #[1:-1,:,:],
                        dtype=tf.float32)
            else:
                gram_template = None

            obj = self._channel(self.layer + "/convolution",
                                self.channel,
                                gram=gram_template,
                                gram_coeff=gram_coeff)
            obj += -self.L1 * objectives.L1(constant=.5)
            obj += -self.TV * objectives.total_variation()
            #obj += self.blur * objectives.blur_input_each_step()

            if transforms == True:
                transforms = [
                    transform.pad(self.jitter),
                    transform.jitter(self.jitter),
                    #transform.random_scale([self.scale ** (n/10.) for n in range(-10, 11)]),
                    #transform.random_rotate(range(-self.rotate, self.rotate + 1))
                ]
            else:
                transforms = []

            T = render.make_vis_T(
                self.model,
                obj,
                param_f=lambda: self.image(240,
                                           channels=self.n_channels,
                                           fft=self.decorrelate,
                                           decorrelate=self.decorrelate),
                optimizer=None,
                transforms=transforms,
                relu_gradient_override=False)
            tf.initialize_all_variables().run()

            images_array = []

            for i in range(opt_steps):
                T("vis_op").run()
                images_array.append(
                    T("input").eval()[:, :, :, -1].reshape((240, 240)))

            plt.figure(figsize=(10, 10))
            # for i in range(1, self.n_channels+1):
            #   plt.imshow(np.load(style_template)[:, :, i-1], cmap='gray',
            #              interpolation='bilinear', vmin=0., vmax=1.)
            #   plt.savefig('gram_template_{}.png'.format(i), bbox_inches='tight')

            texture_images = []

            for i in range(1, self.n_channels + 1):
                # plt.subplot(1, self.n_channels, i)
                image = T("input").eval()[:, :, :, i - 1].reshape((240, 240))
                print("channel: ", i, image.min(), image.max())
                # plt.imshow(image, cmap='gray',
                #            interpolation='bilinear', vmin=0., vmax=1.)
                # plt.xticks([])
                # plt.yticks([])
                texture_images.append(image)
                # show(np.hstack(T("input").eval()))

                os.makedirs(os.path.join(self.savepath, class_), exist_ok=True)
                # print(self.savepath, class_, self.layer+'_' + str(self.channel) +'.png')
                # plt.savefig(os.path.join(self.savepath, class_, self.layer+'_' + str(self.channel) + '_' + str(i) +'_noreg.png'), bbox_inches='tight')
            # plt.show()
            # print(np.array(texture_images).shape)

        return np.array(texture_images), images_array