def main(unused_args):
    model = vision_models.ResnetV1_50_slim()
    model.load_graphdef()
    print('------------------------------')
    print('Loaded Parameters')
    print('------------------------------')
    print('Chosen Class: {}'.format(FLAGS.chosen_class))
    print('Hue         : {}'.format(FLAGS.hue_hex))
    print('No. of Steps: {}'.format(FLAGS.steps))
    print('Output Size : {}'.format(FLAGS.output_size))
    print('Output Path : {}'.format(FLAGS.output_path))
    print('------------------------------')
    render_vis(
        model=model,
        objective_f=objectives.channel(
            'resnet_v1_50/SpatialSqueeze',
            FLAGS.chosen_class,
        ),
        param_f=lambda: cppn(),
        optimizer=tf.train.AdamOptimizer(5e-3),
        transforms=[],
        steps=FLAGS.steps,
        output_size=FLAGS.output_size,
        output_path=FLAGS.output_path,
    )
    classify(FLAGS.output_path)
def visualize_neuron(algo='apex',
                     env='SeaquestNoFrameskip-v4',
                     run_id=1,
                     tag="final",
                     param_f=lambda: image([1, 84, 84, 4]),
                     do_render=False,
                     transforms=[
                         transform.jitter(3),
                     ],
                     layer_no=0,
                     neuron=0,
                     regularization=0,
                     **params):
    tf.reset_default_graph()

    m = MakeAtariModel(algo, env, run_id, tag, local=False)()
    m.load_graphdef()

    if (m.layers[layer_no]['type'] == 'dense'):
        obj = objectives.channel(m.layers[layer_no]['name'], neuron)
    else:
        obj = channel(m.layers[layer_no]['name'],
                      neuron,
                      ordering=m.channel_order)

    out = optimize_input(obj + regularization,
                         m,
                         param_f,
                         transforms,
                         do_render=do_render,
                         **params)
    return out
Exemplo n.º 3
0
 def __call__(self, layer_name, channel_index):
     obj = objectives.channel(layer_name, channel_index)
     image = render.render_vis(self.model,
                               obj,
                               param_f=self.param_f,
                               thresholds=[self.threshold],
                               use_fixed_seed=True,
                               verbose=False)
     return np.array(image[0][0])
Exemplo n.º 4
0
    def start(self):
        self.image = None
        self._doRun(True)

        obj = objectives.channel(self.layer_id, self.unit)
        self.image = render.render_vis(self.model, obj)
        #self.image = render.render_vis(self.model, self.unit_id)

        self._doRun(False)
def render_feature(cppn_f=lambda: image_cppn(84),
                   optimizer=tf.train.AdamOptimizer(0.001),
                   objective=objectives.channel('noname', 0),
                   transforms=[]):
    vis = render.render_vis(m,
                            objective,
                            param_f=cppn_f,
                            optimizer=optimizer,
                            transforms=transforms,
                            thresholds=[2**i for i in range(5, 10)],
                            verbose=False)
    #show(vis)
    return vis
Exemplo n.º 6
0
def get_lucid_orig_image_losses(args):
    #import lucid.modelzoo.vision_models as models
    #model = models.InceptionV1()
    #model.load_graphdef()
    #param_f = lambda: param.image(128, batch=4)
    #obj = objectives.channel("mixed5a", 9) - 1e2*objectives.diversity("mixed5a")
    #obj = objectives.channel("mixed5a", 9)
    #all_images = render.render_vis(model, obj, param_f)
    #image = all_images[0]

    global USE_ORG_IMPORT_MODEL
    USE_ORG_IMPORT_MODEL = True
    import lucid.modelzoo.vision_models as models
    model = models.InceptionV1()
    model.load_graphdef()
    layer = "mixed5a"
    num_units = 16
    param_f = lambda: param.image(224, batch=num_units)
    obj = objectives.channel(layer, 0, 0)
    for idx in range(1, num_units):
        obj += objectives.channel(layer, idx, idx)
    image, all_losses = render_vis(model, obj, param_f)
    return layer, image, all_losses
Exemplo n.º 7
0
    def start_multi(self):
        self.image = None
        self._doRun(True)

        logger.info("!!! running all:")
        for unit in range(self.layer_units):
            self.unit = unit
            self.notify_observers(EngineChange(unit_changed=True))
            logger.info(f"!!! running unit {unit}")
            obj = objectives.channel(self.layer_id, unit)
            self.image = render.render_vis(self.model, obj)
            if not self.running:
                break
            self._doRun(True)

        self._doRun(False)
Exemplo n.º 8
0
def render_set(n, channel):

    print("Starting", channel, n)
    obj = objectives.channel(channel, n)

    # Add this to "sharpen" the image... too much and it gets crazy
    #obj += 0.001*objectives.total_variation()

    sess = create_session()
    t_size = tf.placeholder_with_default(size_n, [])

    f_model = os.path.join(save_model_dest, channel + f"_{n}.npy")

    T = render.make_vis_T(
        model,
        obj,
        param_f=lambda: cppn(t_size),
        transforms=[],
        optimizer=optimizer,
    )
    tf.global_variables_initializer().run()
    train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

    if not os.path.exists(f_model):

        for i in tqdm(range(training_steps)):
            _, loss = sess.run([
                T("vis_op"),
                T("loss"),
            ])

        # Save trained variables
        params = np.array(sess.run(train_vars), object)
        save(params, f_model)
    else:
        params = load(f_model)

    # Save final image
    feed_dict = dict(zip(train_vars, params))
    feed_dict[t_size] = image_size
    images = T("input").eval(feed_dict)
    img = images[0]
    sess.close()

    f_image = os.path.join(save_image_dest, channel + f"_{n}.jpg")
    imageio.imwrite(f_image, img)
    print(f"Saved to {f_image}")
Exemplo n.º 9
0
def render_set(n, channel, train_n):

    # Creates independent images
    param_f = lambda : create_network(batch_size)
    
    obj = sum(
        objectives.channel(channel, n, batch=i)
        for i in range(batch_size)
    )

    # This gives some visual similarity to the models
    #obj += 10*objectives.input_diff(target_img)

    # This does as well but not as nice
    #obj += 0.01*objectives.alignment(channel, decay_ratio=3)

    # This gives some visual similarity to the models
    #obj += 10*objectives.input_diff(target_img)    

    # See more here
    # https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/differentiable-parameterizations/aligned_interpolation.ipynb#scrollTo=jOCYDhRrnPjp

    T = render.make_vis_T(
        model, obj,
        param_f=param_f,
        transforms=[],
        optimizer=optimizer, 
    )
    
    saver = tf.train.Saver()
    tf.global_variables_initializer().run()
    
    for i in tqdm(range(train_n)):
      _, loss = sess.run([T("vis_op"), T("loss"), ])

      
    # Save trained variables
    f_model = os.path.join(save_model_dest, channel + f"_{n}_batches_{batch_size}.ckpt")
    save_path = saver.save(sess, f_model)
      
    # Return image
    images = T("input").eval({t_size: 600})
    return images
Exemplo n.º 10
0
def test_integration_any_channels():
    inceptionv1 = InceptionV1()
    objectives_f = [
        objectives.deepdream("mixed4a_pre_relu"),
        objectives.channel("mixed4a_pre_relu", 360),
        objectives.neuron("mixed3a", 177)
    ]
    params_f = [
        lambda: param.grayscale_image_rgb(128),
        lambda: arbitrary_channels_to_rgb(128, channels=10)
    ]
    for objective_f in objectives_f:
        for param_f in params_f:
            rendering = render.render_vis(
                inceptionv1,
                objective_f,
                param_f,
                verbose=False,
                thresholds=(0, 64),
                use_fixed_seed=True,
            )
            start_image, end_image = rendering

            assert (start_image != end_image).any()
Exemplo n.º 11
0
print('loading model')

model = models.InceptionV1()
model.load_graphdef()
print('calculating')
neuron = ("mixed4a_pre_relu", 476)
version = 7
size = 64  #resulting image cube haa dimensions size X size X size X 3


def param_f3d(size):
    temp = imageCube(size)
    return tf.concat([
        temp,
        tf.transpose(temp, [1, 0, 2, 3]),
        tf.transpose(temp, [2, 1, 0, 3])
    ], 0)


objective = objectives.channel(*neuron)
image_cube = render.render_vis(
    model,
    objective,
    lambda: param_f3d(size),
    transforms=transform.standard_transforms,
    thresholds=(512, ))  # threshold number of steps.
#I used 4096

image_cube = np.array(image_cube)[:, :size]  #image cube
np.save(f"featureCube{size}_{version}.npy", image_cube)
Exemplo n.º 12
0
    # Save trained variables
    train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    params = np.array(sess.run(train_vars), object)

    save(params, f_model)

    # Save final image
    images = T("input").eval({t_size: 600})
    img = images[0]
    sess.close()

    imsave(f_image, img)


C = [objectives.channel(channel, cn) for cn in CN]

#C0 = objectives.channel(channel, cn0)
#C1 = objectives.channel(channel, cn1)

# Render the fixed points

for kn, obj in enumerate(C):
    render_set(channel, starting_training_steps, f'A{kn}', objective=obj)

#render_set(channel, starting_training_steps, 'A0', objective=C0)
#render_set(channel, starting_training_steps, 'A1', objective=C1)

MODELS = [
    load(os.path.join(save_model_dest, channel + f"_A{kn}.npy"))
    for kn in range(len(C))
Exemplo n.º 13
0
 def _get_objective(self, layer, index=0):
     return objectives.channel(layer, index)
import lucid.modelzoo.vision_models as models
from lucid.misc.io import show
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
#import lucid.optvis.optimizer as transfor
import tensorflow as tf
import matplotlib.pyplot as plt

model = models.InceptionV1()
model.load_graphdef()

neuron1 = ('mixed4b_pre_relu', 452)
C = lambda neuron: objectives.channel(*neuron)

out = render.render_vis(model, C(neuron1))
plt.imshow(out[0][0])

JITTER = 1
ROTATE = 5
SCALE = 1.1

transforms = [
    transform.pad(2 * JITTER),
    transform.jitter(JITTER),
    transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
    transform.random_rotate(range(-ROTATE, ROTATE + 1))
]
Exemplo n.º 15
0
def test_render_Inception_v1_slim():

    K.set_learning_phase(0)
    model_path = 'model/tf_inception_v1_slim.pb'
    if not (os.path.exists(model_path)):
        with K.get_session().as_default():
            model = InceptionV1_slim(include_top=True, weights='imagenet')
            os.makedirs('./model', exist_ok=True)

            #model.save('./model/inception_v1_keras_model.h5')
            frozen_graph = freeze_session(
                K.get_session(),
                output_names=[out.op.name for out in model.outputs],
                clear_devices=True)
            # Save the pb model
            tf.io.write_graph(frozen_graph,
                              logdir="model",
                              name="tf_inception_v1_slim.pb",
                              as_text=False)

    with tf.Graph().as_default() as graph, tf.Session() as sess:

        # f = gfile.FastGFile("/model/tf_inception_v1.pb", 'rb')
        # graph_def = tf.GraphDef()
        # # Parses a serialized binary message into the current message.
        # graph_def.ParseFromString(f.read())
        # f.close()

        # sess.graph.as_default()
        # # Import a serialized TensorFlow `GraphDef` protocol buffer
        # # and place into the current default `Graph`.
        # tf.import_graph_def(graph_def)

        # nodes_tab = [n.name for n in tf.get_default_graph().as_graph_def().node]
        #print(nodes_tab)
        with gradient_override_map({
                'Relu': redirected_relu_grad,
                'ReLU': redirected_relu_grad
        }):
            # cela ne semble pas apporter quoique ce soit de particulier
            lucid_inception_v1 = Lucid_Inception_v1_slim()
            lucid_inception_v1.load_graphdef()

        neuron1 = ('mixed4b_pre_relu', 111)  # large fluffy
        C = lambda neuron: objectives.channel(*neuron)
        out = render.render_vis(lucid_inception_v1, 'Mixed_4b_Concatenated/concat:452',\
                                relu_gradient_override=True,use_fixed_seed=True)
        plt.imshow(out[0][0])

        JITTER = 1
        ROTATE = 5
        SCALE = 1.1

        transforms = [
            transform.pad(2 * JITTER),
            transform.jitter(JITTER),
            transform.random_scale([SCALE**(n / 10.) for n in range(-10, 11)]),
            transform.random_rotate(range(-ROTATE, ROTATE + 1))
        ]
        # https://github.com/tensorflow/lucid/issues/82
        with gradient_override_map({
                'Relu': redirected_relu_grad,
                'ReLU': redirected_relu_grad
        }):
            out = render.render_vis(lucid_inception_v1, "Mixed_4b_Concatenated/concat:452", transforms=transforms,
                                     param_f=lambda: param.image(64),
                                     thresholds=[2048], verbose=False,\
                                     relu_gradient_override=True,use_fixed_seed=True)
        # out = render.render_vis(lucid_inception_v1, "Mixed_4d_Branch_2_b_3x3_act/Relu:452", transforms=transforms,
        #                          param_f=lambda: param.image(64),
        #                          thresholds=[2048], verbose=False) # Cela ne marche pas !
        plt.imshow(out[0][0])

        out = render.render_vis(lucid_inception_v1, "Mixed_3c_Concatenated/concat:479", transforms=transforms,
                                 param_f=lambda: param.image(64),
                                 thresholds=[2048], verbose=False,\
                                 relu_gradient_override=True,use_fixed_seed=True)
        plt.imshow(out[0][0])
Exemplo n.º 16
0
def print_PCA_images(model_path,layer_to_print,weights,index_features_withinLayer,\
                     path_output='',prexif_name='',\
                     input_name='block1_conv1_input',Net='VGG',sizeIm=256,\
                     DECORRELATE=True,ROBUSTNESS=True,\
                     inverseAndSave=True,cossim=False,dot_vector = False,
                     num_features=None):
    #    ,printOnlyRGB=True

    if not (os.path.isfile(os.path.join(model_path))):
        raise (ValueError(model_path + ' does not exist !'))

    if Net == 'VGG':
        lucid_net = Lucid_VGGNet(model_path=model_path, input_name=input_name)
    elif Net == 'InceptionV1':
        lucid_net = Lucid_InceptionV1(model_path=model_path,
                                      input_name=input_name)
    elif Net == 'InceptionV1_slim':
        lucid_net = Lucid_Inception_v1_slim(model_path=model_path,
                                            input_name=input_name)
    elif Net == 'ResNet':
        lucid_net = Lucid_ResNet(model_path=model_path, input_name=input_name)
    else:
        raise (ValueError(Net + 'is unkonwn'))
    lucid_net.load_graphdef()
    #nodes_tab = [n.name for n in tf.compat.v1.get_default_graph().as_graph_def().node]

    obj_str, kind_layer = get_obj_and_kind_layer(layer_to_print, Net)

    param_f = lambda: param.image(
        sizeIm, fft=DECORRELATE, decorrelate=DECORRELATE)

    if DECORRELATE:
        ext = '_Deco'
    else:
        ext = ''

    if ROBUSTNESS:
        transforms = transform.standard_transforms
        ext += ''
    else:
        transforms = []
        ext += '_noRob'


#    LEARNING_RATE = 0.005 # Valeur par default
#    optimizer = tf.train.AdamOptimizer(LEARNING_RATE)

    if Net == 'VGG':
        name_base = layer_to_print + kind_layer + '_' + prexif_name + ext + '.png'
    elif Net == 'InceptionV1':
        name_base = layer_to_print + kind_layer + '_' + prexif_name + ext + '.png'
    elif Net == 'InceptionV1_slim':
        name_base = layer_to_print + kind_layer + '_' + prexif_name + ext + '.png'
    else:
        raise (NotImplementedError)

    # input = couple of Two arguments : first one name of the layer,
    # second one number of the features must be an integer

    if (cossim is True):
        raise (NotImplementedError)
        # Pas fini ici il faut le S
        # cette fonction objectif est le produit entre le produit scalaire de v et des couches ainsi que
        # multiplier par le cosinus a une certaine valeur
        obj_list = ([
            direction_neuron_cossim_S(layer, v, batch=n, S=S, cossim_pow=4)
            for n, v in enumerate(directions)
        ])
        total_obj = objectives.Objective.sum(obj_list)
    elif dot_vector:
        assert (not (num_features is None))

        def inner(T):
            layer = T(obj_str)
            #print('num_features',num_features)
            if len(weights) == num_features:
                total_obj = tf.reduce_mean(layer * weights)
            else:
                weights_total = np.zeros((num_features, ))
                weights_total[index_features_withinLayer] = weights
                total_obj = tf.reduce_mean(layer * weights_total)

            return (total_obj)

        total_obj = inner
    else:
        C = lambda layer_i: objectives.channel(*layer_i)
        total_obj = None
        for i, weight_i in zip(index_features_withinLayer, weights):

            if total_obj is None:
                total_obj = weight_i * C((obj_str, i))
            else:
                total_obj += weight_i * C((obj_str, i))

    output_im = render.render_vis(
        lucid_net,
        total_obj,
        transforms=transforms,
        thresholds=[2048],
        param_f=param_f,
        #                                      optimizer=optimizer,
        use_fixed_seed=True)
    image = np.array(output_im[0][0] *
                     255)  # car une seule image dans le batch
    name_output = os.path.join(path_output, name_base)
    new_output_path = os.path.join(path_output, 'RGB')

    if inverseAndSave:
        name_output = name_output.replace('.png', '_toRGB.png')
        image = image[:, :, [2, 1, 0]]
        tf.keras.preprocessing.image.save_img(name_output, image)

    else:
        tf.keras.preprocessing.image.save_img(name_output, image)
        pathlib.Path(new_output_path).mkdir(parents=True, exist_ok=True)
        change_from_BRG_to_RGB(img_name_path=name_output,
                               output_path=new_output_path,
                               ext_name='toRGB')
Exemplo n.º 17
0
    input_name = 'input'


def show_image(image):
    html = ""
    data_url = _image_url(image)
    html += '<img width=\"100\" style=\"margin: 10px\" src=\"' + data_url + '\">'
    with open("img.html", "w") as f:
        f.write(html)
    _display_html(html)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--crop_size', type=int, default=128)
    parser.add_argument('--model_file', type=str, default='lucid_model.pb')
    args = parser.parse_args()

    model = LucidModel()
    model.model_path = args.model_file
    model.image_shape = [args.crop_size, args.crop_size, 3]

    print("Nodes in graph:")
    for node in model.graph_def.node:
        print(node.name)
    print("=" * 30)

    obj = objectives.channel("prediction/Conv2D", 0) - objectives.channel("prediction/Conv2D", 0)
    res = render.render_vis(model, obj, transforms=[])
    show_image(res)
Exemplo n.º 18
0
import lucid.optvis.param as param

from lucid.modelzoo.vision_base import Model


class FrozenNetwork(Model):
    model_path = network_protobuf_path
    image_shape = [256, 256, 3]
    image_value_range = (0, 1)
    input_name = 'input_1'


network = FrozenNetwork()
network.load_graphdef()

obj = objectives.channel(layer_name, neuron_index)
param_f = lambda: param.image(512, fft=True, decorrelate=True)
renders = render.render_vis(network, obj, param_f, thresholds=(2024, ))

last_image_file = sorted(glob.glob("projection/out/*step*.png"))[-1]
stylegan_render = imageio.imread(last_image_file)
lucid_render = renders[0][0]
lucid_render = (np.clip(lucid_render, 0, 1) * 255).astype(np.uint8)

h, w = lucid_render.shape[:2]
canvas = PIL.Image.new('RGB', (w * 2, h), 'white')
canvas.paste(Image.fromarray(lucid_render), (0, 0))
canvas.paste(
    Image.fromarray(stylegan_render).resize((w, h), PIL.Image.LANCZOS), (w, 0))
canvas.save("projection/combined_%s_%03d.png" %
            (layer_name.split("/")[0], neuron_index))
Exemplo n.º 19
0
def make_lucid_imagenet_dataset(
        model_tag,
        n_random=9,
        min_size=3,
        max_prop=0.8,
        display=True,
        infodir='/project/clusterability_in_neural_networks/results/',
        savedir='/project/clusterability_in_neural_networks/datasets/'):

    assert model_tag in VIS_NETS

    with open(infodir + model_tag + '_clustering_info.pkl', 'rb') as f:
        clustering_info = pickle.load(f)

    layer_names = clustering_info['layers']
    labels_in_layers = [
        np.array(lyr_labels) for lyr_labels in clustering_info['labels']
    ]
    layer_sizes = [len(labels) for labels in labels_in_layers]
    n_clusters = max([max(labels) for labels in labels_in_layers]) + 1

    if model_tag == 'vgg16':
        lucid_net = models.VGG16_caffe()
    elif model_tag == 'vgg19':
        lucid_net = models.VGG19_caffe()
    else:
        lucid_net = models.ResnetV1_50_slim()
    lucid_net.load_graphdef()
    layer_map = NETWORK_LAYER_MAP[model_tag]

    max_images = [
    ]  # to be filled with images that maximize cluster activations
    # min_images = []  # to be filled with images that minimize cluster activations
    random_max_images = [
    ]  # to be filled with images that maximize random units activations
    # random_min_images = []  # to be filled with images that minimize random units activations
    max_losses = []  # to be filled with losses
    # min_losses = []  # to be filled with losses
    random_max_losses = []  # to be filled with losses
    # random_min_losses = []  # to be filled with losses
    sm_sizes = []  # list of submodule sizes
    sm_layer_sizes = []
    sm_layers = []  # list of layer names
    sm_clusters = []  # list of clusters

    for layer_name, labels, layer_size in zip(layer_names, labels_in_layers,
                                              layer_sizes):

        if layer_name not in layer_map.keys():
            continue

        lucid_name = layer_map[layer_name]
        max_size = max_prop * layer_size

        for clust_i in range(n_clusters):

            sm_binary = labels == clust_i
            sm_size = sum(sm_binary)
            if sm_size <= min_size or sm_size >= max_size:  # skip if too big or small
                continue

            sm_sizes.append(sm_size)
            sm_layer_sizes.append(layer_size)
            sm_layers.append(layer_name)
            sm_clusters.append(clust_i)

            print(f'{model_tag}, layer names: {layer_name}, {lucid_name}')
            print(f'submodule_size: {sm_size}, layer_size: {layer_size}')

            sm_idxs = [i for i in range(layer_size) if sm_binary[i]]
            max_obj = sum(
                [objectives.channel(lucid_name, unit) for unit in sm_idxs])
            # min_obj = -1 * sum([objectives.channel(lucid_name, unit) for unit in sm_idxs])

            max_im, max_loss = render_vis_with_loss(lucid_net,
                                                    max_obj,
                                                    size=IMAGE_SIZE_IMAGENET,
                                                    thresholds=(256, ))
            max_images.append(max_im)
            max_losses.append(max_loss)
            # min_im, min_loss = render_vis_with_loss(lucid_net, min_obj)
            # min_images.append(min_im)
            # min_losses.append(min_loss)
            if display:
                print(f'loss: {round(max_loss, 3)}')
                show(max_im)

            rdm_losses = []
            rdm_ims = []
            for _ in range(n_random):  # random max results
                rdm_idxs = np.random.choice(np.array(range(layer_size)),
                                            size=sm_size,
                                            replace=False)
                random_max_obj = sum([
                    objectives.channel(lucid_name, unit) for unit in rdm_idxs
                ])
                random_max_im, random_max_loss = render_vis_with_loss(
                    lucid_net,
                    random_max_obj,
                    size=IMAGE_SIZE_IMAGENET,
                    thresholds=(256, ))
                random_max_images.append(random_max_im)
                random_max_losses.append(random_max_loss)
                rdm_losses.append(round(random_max_loss, 3))
                rdm_ims.append(np.squeeze(random_max_im))
            if display:
                print(f'random losses: {rdm_losses}')
                show(np.hstack(rdm_ims))

            # for _ in range(n_random):  # random min results
            #     rdm_idxs = np.random.choice(np.array(range(layer_size)), size=sm_size, replace=False)
            #     random_min_obj = -1 * sum([objectives.channel(lucid_name, unit) for unit in rdm_idxs])
            #     random_min_im, random_min_loss = render_vis_with_loss(lucid_net, random_min_obj)
            #     random_min_images.append(random_min_im)
            #     random_min_losses.append(random_min_loss)

    max_images = np.squeeze(np.array(max_images))
    # min_images = np.squeeze(np.array(min_images))
    random_max_images = np.squeeze(np.array(random_max_images))
    # random_min_images = np.squeeze(np.array(random_min_images))
    max_losses = np.array(max_losses)
    # min_losses = np.array(min_losses)
    random_max_losses = np.array(random_max_losses)
    # random_min_losses = np.array(random_min_losses)

    results = {
        'max_images': max_images,  # 'min_images': min_images,
        'random_max_images':
        random_max_images,  # 'random_min_images': random_min_images,
        'max_losses': max_losses,  # 'min_losses': min_losses,
        'random_max_losses':
        random_max_losses,  # 'random_min_losses': random_min_losses,
        'sm_sizes': sm_sizes,
        'sm_layer_sizes': sm_layer_sizes,
        'sm_layers': sm_layers,
        'sm_clusters': sm_clusters
    }

    with open(savedir + model_tag + '_max_data.pkl', 'wb') as f:
        pickle.dump(results, f)
Exemplo n.º 20
0
def make_lucid_dataset(
        model_tag,
        lucid_net,
        all_labels,
        is_unpruned,
        transforms=[],
        n_random=9,
        min_size=5,
        max_prop=0.8,
        display=True,
        savedir='/project/clusterability_in_neural_networks/datasets/',
        savetag=''):

    if 'cnn' in model_tag.lower():
        cnn_params = CNN_VGG_MODEL_PARAMS if 'vgg' in str(
            model_tag).lower() else CNN_MODEL_PARAMS
        layer_sizes = [cl['filters'] for cl in cnn_params['conv']]
        layer_names = ['conv2d/Relu'] + [
            f'conv2d_{i}/Relu' for i in range(1, len(layer_sizes))
        ]
    else:  # it's an mlp
        layer_sizes = [256, 256, 256, 256]
        layer_names = ['dense/Relu'] + [
            f'dense_{i}/Relu' for i in range(1, len(layer_sizes))
        ]
    if not is_unpruned:
        layer_names = ['prune_low_magnitude_' + ln for ln in layer_names]

    labels_in_layers = [
        np.array(lyr_labels)
        for lyr_labels in list(splitter(all_labels, layer_sizes))
    ]

    max_images = [
    ]  # to be filled with images that maximize cluster activations
    random_max_images = [
    ]  # to be filled with images that maximize random units activations
    max_losses = []  # to be filled with losses
    random_max_losses = []  # to be filled with losses
    sm_sizes = []  # list of submodule sizes
    sm_layer_sizes = []
    sm_layers = []  # list of layer names
    sm_clusters = []  # list of clusters

    imsize = IMAGE_SIZE_CIFAR10 if 'vgg' in model_tag.lower() else IMAGE_SIZE

    for layer_name, labels, layer_size in zip(layer_names, labels_in_layers,
                                              layer_sizes):

        max_size = max_prop * layer_size

        for clust_i in range(max(all_labels) + 1):

            sm_binary = labels == clust_i
            sm_size = sum(sm_binary)
            if sm_size <= min_size or sm_size >= max_size:  # skip if too big or small
                continue

            sm_sizes.append(sm_size)
            sm_layer_sizes.append(layer_size)
            sm_layers.append(layer_name)
            sm_clusters.append(clust_i)

            # print(f'{model_tag}, layer: {layer_name}')
            # print(f'submodule_size: {sm_size}, layer_size: {layer_size}')

            sm_idxs = [i for i in range(layer_size) if sm_binary[i]]
            max_obj = sum(
                [objectives.channel(layer_name, unit) for unit in sm_idxs])

            max_im, max_loss = render_vis_with_loss(lucid_net,
                                                    max_obj,
                                                    size=imsize,
                                                    transforms=transforms)
            max_images.append(max_im)
            max_losses.append(max_loss)
            if display:
                print(f'loss: {round(max_loss, 3)}')
                show(max_im)

            rdm_losses = []
            rdm_ims = []
            for _ in range(n_random):  # random max results
                rdm_idxs = np.random.choice(np.array(range(layer_size)),
                                            size=sm_size,
                                            replace=False)
                random_max_obj = sum([
                    objectives.channel(layer_name, unit) for unit in rdm_idxs
                ])
                random_max_im, random_max_loss = render_vis_with_loss(
                    lucid_net,
                    random_max_obj,
                    size=imsize,
                    transforms=transforms)
                random_max_images.append(random_max_im)
                random_max_losses.append(random_max_loss)
                rdm_ims.append(np.squeeze(random_max_im))
                rdm_losses.append(round(random_max_loss, 3))
            if display:
                print(f'random losses: {rdm_losses}')
                show(np.hstack(rdm_ims))

    max_images = np.squeeze(np.array(max_images))
    random_max_images = np.squeeze(np.array(random_max_images))
    max_losses = np.array(max_losses)
    random_max_losses = np.array(random_max_losses)

    results = {
        'max_images': max_images,
        'random_max_images': random_max_images,
        'max_losses': max_losses,
        'random_max_losses': random_max_losses,
        'sm_sizes': sm_sizes,
        'sm_layer_sizes': sm_layer_sizes,
        'sm_layers': sm_layers,
        'sm_clusters': sm_clusters
    }

    if is_unpruned:
        suff = '_unpruned_max_data'
    else:
        suff = '_pruned_max_data'

    with open(savedir + model_tag + suff + savetag + '.pkl', 'wb') as f:
        pickle.dump(results, f)
Exemplo n.º 21
0
    image_value_range = (0, 1)
    input_name = 'input_1'


network = FrozenNetwork()
network.load_graphdef()

if LAYER == "-":
    images = []
    layers = []
    for l in sys.stdin:
        layers.append(l.strip())

    for layer in layers:
        for i in range(COLUMNS):
            obj = objectives.channel(layer, i)
            renders = render.render_vis(network, obj)
            assert len(renders) == 1
            image = renders[0]
            assert len(image) == 1
            image = image[0]
            images.append(image)
    images = np.array(images)
    height, width = 128, 128
    rows = len(layers)
    print(images.shape)
    assert images.shape == (rows * COLUMNS, 128, 128, 3)
    grid = (images.reshape(rows, COLUMNS, height, width,
                           3).swapaxes(1, 2).reshape(height * rows,
                                                     width * COLUMNS, 3))
    scipy.misc.imsave(OUTPUT_PREFIX + ".png", grid)
Exemplo n.º 22
0
        print(t_input, t_prep_input)
        tf.import_graph_def(self.graph_def, {self.input_name: t_prep_input},
                            name=scope)
        self.post_import(scope)


# lucid_model = vision_base.Model()
# lucid_model.model_path = tf_model
lucid_model = PbModel()
lucid_model.load_graphdef()

temp_graph_def = graph.as_graph_def()

# fd = dg

# print(lucid_model.graph_def)

# model = models.InceptionV1().load_graphdef()
# print(model)
# print(dir(model))

obj = objectives.channel("discriminator/dis_n_conv_1_4/Conv2D", 2)
param_f = lambda: tf.concat([
    param.rgb_sigmoid(param.naive([1, 128, 128, 3])),
    param.fancy_colors(param.naive([1, 128, 128, 8]) / 1.3),
    param.rgb_sigmoid(param.laplacian_pyramid([1, 128, 128, 3]) / 2.),
    param.fancy_colors(param.laplacian_pyramid([1, 128, 128, 8]) / 2. / 1.3),
], 0)
render.render_vis(lucid_model, obj, param_f)

# _ = render.render_vis(lucid_model, "discriminator/dis_n_conv_1_4/Conv2D:0")
Exemplo n.º 23
0
def test_channel(inceptionv1):
    objective = objectives.channel("mixed4a_pre_relu", 42)
    assert_gradient_ascent(objective, inceptionv1)
Exemplo n.º 24
0
        if keras_model.layers[i].name == "conv2d_{}".format(layer_idx):
            keras_layer_idx = i
            break
    layer_name = "conv2d_{}/convolution".format(layer_idx)
    filter_idx = args.filter_idx
    n_of_filter = args.num_of_filters

    if filter_idx is None:
        filters = [
            int(idx) for idx in get_strongest_filter(inceptionv3, layer_name,
                                                     x, n_of_filter)
        ]
    else:
        filters = [filter_idx]

    for idx in filters:
        obj = objectives.channel(layer_name, idx)

        file_name = "Conv{}_{}".format(layer_idx, idx)
        img_file = file_name + ".png"

        render_vis(inceptionv3, obj, img_file, idx)
        saliency_img = visualize_saliency(keras_model,
                                          keras_layer_idx,
                                          filter_indices=idx,
                                          seed_input=y)
        plt.imshow(img)
        plt.imshow(saliency_img, alpha=.7)
        plt.axis('off')
        plt.savefig("{}_saliency.png".format(file_name))
Exemplo n.º 25
0
def get_vm_model_image_losses(args, layer=None):
    model_name_scope = None
    if args.model_type == 'vm_model':
        model_name_scope = 'encode'
    elif args.model_type == 'simclr_model':
        model_name_scope = 'base_model'
    else:
        raise NotImplementedError('Model type %s not supported!' %
                                  args.model_type)

    def model(t_image):
        t_image = t_image * 255
        ending_points, _ = get_network_outputs({'images': t_image},
                                               prep_type=args.prep_type,
                                               model_type=args.model_type,
                                               setting_name=args.setting_name,
                                               module_name=['encode'],
                                               **json.loads(args.cfg_kwargs))

        all_vars = tf.global_variables()
        var_list = [x for x in all_vars if x.name.startswith(model_name_scope)]
        saver = tf.train.Saver(var_list=var_list)

        if not args.from_scratch:
            if not args.load_from_ckpt:
                model_ckpt_path = tf_model_loader.load_model_from_mgdb(
                    db=args.load_dbname,
                    col=args.load_colname,
                    exp=args.load_expId,
                    port=args.load_port,
                    cache_dir=args.model_cache_dir,
                    step_num=args.load_step,
                )
            else:
                model_ckpt_path = args.load_from_ckpt
            saver.restore(tf.get_default_session(), model_ckpt_path)
        else:
            SESS = tf.get_default_session()
            init_op_global = tf.global_variables_initializer()
            SESS.run(init_op_global)
            init_op_local = tf.local_variables_initializer()
            SESS.run(init_op_local)

        all_train_ref = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)

        def _remove_others(vars_ref):
            cp_vars_ref = copy.copy(vars_ref)
            for each_v in cp_vars_ref:
                if each_v.op.name.startswith(model_name_scope):
                    vars_ref.remove(each_v)

        _remove_others(all_train_ref)
        return ending_points

    layer = layer or "encode_9"
    batch_size = 16
    param_f = lambda: param.image(224, batch=batch_size)
    num_of_units = 64
    images = []
    all_losses = []

    for start_idx in range(0, num_of_units, batch_size):
        obj = objectives.channel(layer, 0 + start_idx, 0)
        for idx in range(1, batch_size):
            obj += objectives.channel(layer, idx + start_idx, idx)
        image, losses = render_vis(model,
                                   obj,
                                   param_f,
                                   model_name_scope=model_name_scope)
        images.append(image)
        all_losses.append(losses)
    images = np.concatenate(images, axis=0)
    all_losses = np.sum(all_losses, axis=0)
    return layer, images, all_losses
Exemplo n.º 26
0
                    type=str,
                    help='path to output folder')
parser.add_argument('--layer', default='conv5_block3_out/add')
parser.add_argument('--depth', default=2048, type=int)
args = parser.parse_args()

if __name__ == '__main__':
    model = CovidNetB()
    model.load_graphdef()

    for i in range(args.depth):
        param_f = lambda: tf.tile(
            tf.math.reduce_mean(param.image(128, fft=True),
                                axis=3,
                                keepdims=True), [1, 1, 1, 3])
        obj = -objectives.channel("conv5_block3_out/add", i)

        deprecation._PRINT_DEPRECATION_WARNINGS = False
        tf.logging.set_verbosity(tf.logging.ERROR)

        print("\nProgress: " + str(i) + '/' + str(args.depth) + '\n')

        img = render.render_vis(model,
                                obj,
                                param_f,
                                transforms=transform.standard_transforms,
                                thresholds=[
                                    200,
                                ])
        img = np.uint8(255 * (img[0]))
        cv2.imwrite(
Exemplo n.º 27
0

class FrozenNetwork(Model):
    model_path = MODEL_PATH
    image_shape = [224, 224, 3]
    image_value_range = (0, 1)
    input_name = 'input_1'


network = FrozenNetwork()
network.load_graphdef()

#for layer in network:
#    print(layer.get_shape())

pixels = 224

param_f = lambda: param.image(pixels, fft=True, decorrelate=True)
#obj_test = objectives.channel(LAYER, NEURON_INDEX).get_shape()
#print(obj_test)
obj = objectives.channel(LAYER, NEURON_INDEX)
images = render.render_vis(network, obj, param_f, thresholds=(1024, ))
assert len(images) == 1
image = images[0]
assert len(image) == 1
image = image[0]

out_filename = LAYER.replace(
    "/", "-") + "_" + str(NEURON_INDEX) + "_" + MODEL_PATH.zfill(10) + ".png"
scipy.misc.imsave(out_filename, image)
Exemplo n.º 28
0
def build_sprites(local_root='.',
                  graph_version=None,
                  model_loader=None,
                  vis=None,
                  layers=None,
                  vis_filename='vis.js'):

    if graph_version is None:
        raise ValueError("graph_version cannot be None")

    if model_loader is None:
        raise ValueError("model_loader cannot be None")

    graph_version_path = os.path.join(local_root, graph_version)

    if not os.path.isdir(graph_version_path):
        raise ValueError(
            "No graph vis directory: {}".format(graph_version_path))

    if vis is None:
        vis = {}

    update_dict_from_json(json_path=os.path.join(graph_version_path,
                                                 vis_filename),
                          updatee=vis)

    #
    graph_steps = vis['steps'] if 'steps' in vis else []

    if len(graph_steps) == 0:
        print("no graph instances")
        return

    for graph_step in graph_steps:

        graph_step_dir = os.path.join(graph_version_path, graph_step)
        image_dir = os.path.join(graph_step_dir, 'sprites')
        image_consumed_dir = os.path.join(graph_step_dir, 'sprites_consumed')
        image_scum_dir = os.path.join(graph_step_dir, 'sprites_scum')
        sprite_map_dir = os.path.join(graph_step_dir, 'spritemaps')

        log_path = os.path.join(graph_step_dir, 'losses.csv')

        if not os.path.isdir(graph_step_dir):
            os.mkdir(graph_step_dir)
        if not os.path.isdir(image_dir):
            os.mkdir(image_dir)
        if not os.path.isdir(image_consumed_dir):
            os.mkdir(image_consumed_dir)
        if not os.path.isdir(sprite_map_dir):
            os.mkdir(sprite_map_dir)

        sprite_dirs = [
            d for d in [image_dir, image_consumed_dir, image_scum_dir]
            if os.path.isdir(d)
        ]

        # graph step specific config
        step_vis = deepcopy(vis)

        update_dict_from_json(json_path=os.path.join(graph_step_dir,
                                                     vis_filename),
                              updatee=step_vis)

        #
        max_index = step_vis['max_index'] if 'max_index' in step_vis else 2048
        scale = step_vis['scale'] if 'scale' in step_vis else 64
        thresholds = step_vis['thresholds'] if 'thresholds' in step_vis else [
            64
        ]
        vis_loss = step_vis['loss'] if 'loss' in step_vis else {}

        batch_id = get_next_batch_id(loss_log_path=log_path)

        # drives off model json - as might be customised
        graph_model = get_graph_model(graph_version=graph_version,
                                      model_loader=model_loader)

        layers = graph_model['layers']

        # if not None and not empty then only build sprites for these layers/indexes
        if 'target_layers' in vis:
            target_layers = vis['target_layers']
            layers = [
                layer for layer in layers if target_layers is None
                or len(target_layers) == 0 or layer['index'] in target_layers
            ]

        target_indexes = [] if 'target_indexes' not in vis else vis[
            'target_indexes']

        use_cppn = True if 'param' in vis and vis['param'] == 'cppn' else False

        # load existing sprite details
        existing_sprite_details = get_existing_sprite_details(
            sprite_dirs=sprite_dirs, scale=scale)

        print("\nBUILDING SPRITES: graph_version={} steps={}".format(
            graph_version, graph_step))
        print("   layers={}".format([layer['index'] for layer in layers]))

        for layer in layers:

            layer_name = layer['name']
            layer_index = layer['index']

            adam = layer['adam']
            transform_id = layer['transform_id']

            model = None

            optimizer = tf.train.AdamOptimizer(adam)
            transforms = get_transforms(transform_id)

            #
            existing_layer_sprites = existing_sprite_details[
                layer_index] if layer_index in existing_sprite_details else []

            try:
                print("\nLAYER: {}\n".format(layer))

                num_processed = 0

                for index in range(0, max_index):

                    # check for abort in vis files
                    vf_abort = check_abort(dirs=[
                        os.path.join(graph_version_path, vis_filename),
                        os.path.join(graph_step_dir, vis_filename)
                    ])

                    if len(vf_abort) > 0:
                        print("\nDetected abort in vis files: {}".format(
                            vf_abort))
                        return

                    # check any target indexes
                    if not (target_indexes is None or len(target_indexes) == 0
                            or index in target_indexes):
                        continue

                    #
                    existing_index_sprite_thresholds = existing_layer_sprites[
                        index] if index in existing_layer_sprites else []

                    # calculate work to do
                    # do all thresholds already existing
                    thresholds_to_generate = [
                        t for t in thresholds
                        if t not in existing_index_sprite_thresholds
                    ]

                    if len(thresholds_to_generate) == 0:
                        continue

                    # can start from an existing threshold
                    max_existing_threshold = max(
                        existing_index_sprite_thresholds
                    ) if len(existing_index_sprite_thresholds) > 0 else None

                    if max_existing_threshold is not None and max_existing_threshold <= min(
                            thresholds_to_generate):

                        threshold_start = max_existing_threshold + 1

                        img_path = [
                            ip for ip in [
                                get_image_path(sd, layer_index, index,
                                               max_existing_threshold, scale)
                                for sd in sprite_dirs
                            ] if os.path.isfile(ip)
                        ][0]

                        with Image.open(img_path) as im:
                            im.load()

                            # make array
                            im_1 = np.array(im)

                        # add dummy batch dimension
                        im_2 = np.expand_dims(im_1, axis=0)

                        # reduce less than one
                        init_val = im_2.astype(np.float32) / 256

                        param_f = lambda: lucid_images.image(
                            scale, fft=False, init_val=init_val)

                    elif use_cppn:
                        threshold_start = 0
                        adam = 0.00055
                        optimizer = tf.train.AdamOptimizer(adam)
                        param_f = lambda: param.cppn(scale)

                    else:
                        threshold_start = 0
                        param_f = lambda: param.image(
                            scale, fft=True, decorrelate=True)

                    # drop the model regularly
                    if num_processed % 100 == 0:
                        print("Reloading model ...")
                        model = None
                        num_processed = 0

                    if model is None:
                        model = model_loader(graph_step)
                        model.load_graphdef()

                    # start the feature
                    print("\nFEATURE: {}:{}\n".format(layer['name'], index))

                    log_item = {
                        "batch_id": batch_id,
                        "timestamp": current_milli_time(),
                        "scale": scale,
                        "adam": adam,
                        "transforms": transform_id,
                        "layer": layer_index,
                        "index": index
                    }

                    visualizations = []

                    try:
                        visualization = get_visualizations_and_losses(
                            model,
                            objectives.channel(layer_name, index),
                            transforms=transforms,
                            param_f=param_f,
                            optimizer=optimizer,
                            threshold_start=threshold_start,
                            thresholds=thresholds,
                            visualization_index=index,
                            visualization_layer=layer_index,
                            minimum_loss=vis_loss['minimum_loss_threshold']
                            if 'minimum_loss_threshold' in vis_loss else 0,
                            num_bins=vis_loss['num_bins']
                            if 'num_bins' in vis_loss else 0,
                            max_bin_hits=vis_loss['max_bin_hits']
                            if 'max_bin_hits' in vis_loss else 0,
                            bin_factor=vis_loss['bin_factor']
                            if 'bin_factor' in vis_loss else 0,
                            loss_logger=lambda l, t, s: loss_logger(
                                log_file=log_path,
                                item=log_item,
                                threshold=t,
                                loss=l,
                                status=s))

                        num_processed = num_processed + 1

                        if len(visualization) == 0:
                            continue

                        # check losses
                        losses = [v[2] for v in visualization]

                        print("\nLOSSES: feature={}:{}; {}\n".format(
                            layer_index, index, losses))

                        visualizations.append(visualization)

                    finally:
                        if len(visualizations) > 0:
                            store_visualizations_and_losses(
                                visualizations,
                                output_dir=image_dir,
                                scale=scale)

            except ValueError as e:
                msg = "{}".format(e)
                if 'slice index' in msg and 'out of bounds' in msg:
                    print(
                        "Closing layer: slice index out of bounds: {}".format(
                            e))
                else:
                    raise e