Example #1
0
def main():
    model = load_model('model2.h5py')
    model.summary()
    # swap softmax activation function to linear
    layer_idx = -1
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)


    layer_names = ['leaky_re_lu_1', 'leaky_re_lu_2', 'leaky_re_lu_3', 'leaky_re_lu_4']
    for lid in range(len(layer_names)):
        layer_name = layer_names[lid]
        layer_idx = utils.find_layer_idx(model, layer_name)
        filters = np.arange(get_num_filters(model.layers[layer_idx]))
        filters = _shuffle(filters)
        vis_images = []
        for idx in range(16):
            indices = filters[idx]
            img = visualize_activation(model, layer_idx, filter_indices=indices, tv_weight=0.,
                               input_modifiers=[Jitter(0.5)])
            vis_images.append(img)
        #img = img.reshape((48, 48))
        #plt.imshow(img, cmap="Blues")
        #plt.show()

        stitched = utils.stitch_images(vis_images, cols=8)
        plt.figure()
        plt.axis('off')
        shape = stitched.shape
        stitched = stitched.reshape((shape[0], shape[1]))
        plt.imshow(stitched)
        plt.title(layer_name)
        plt.tight_layout()
        plt.savefig('Filter_{}.png'.format(lid))
Example #2
0
    def visualize_filters(model,
                          layer_name,
                          input_data,
                          filter_indices=None,
                          mode="guided"):
        from vis.visualization import get_num_filters, visualize_saliency
        from vis.utils import utils
        from vis.input_modifiers import Jitter
        """
        Visualize what pattern activates a filter. Helps to discover what a 
        filter might be computing
        :returns tuple(List, List) containing input images and heatmaps
                frames from each sample is stitched into a single image
        """
        get_num_filters
        inputs = []
        outputs = []
        # number of filters for this layer
        num_filters = get_num_filters(model.get_layer(layer_name))
        layer_idx = utils.find_layer_idx(model, layer_name)
        for sample in input_data:
            heatmaps = visualize_saliency(
                model,
                layer_idx,
                filter_indices=filter_indices,
                seed_input=sample,
                backprop_modifier=mode,
            )
            inputs.append(utils.stitch_images(sample, margin=0))
            outputs.append(utils.stitch_images(heatmaps, margin=0))

        return np.array(inputs), np.array(outputs)
Example #3
0
def layer_visualisation(model, layer, start, end):
    layer_idx = utils.find_layer_idx(model, layer)
    filters = np.random.permutation(get_num_filters(
        model.layers[layer_idx]))[:10]
    nbFilters = len(filters)
    # create a folder in static/img/ for every layers
    path = "static/img/" + layer + '/'
    if not os.path.isdir(path):
        os.makedirs(path)
    for index in range(start, end + 1):
        if index < nbFilters:
            namePath = img_name(layer, index)
            if not os.path.exists(namePath):
                layer_idx = utils.find_layer_idx(model, layer)  #layer
                plt.rcParams['figure.figsize'] = (18, 6)
                img = visualize_activation(model,
                                           layer_idx,
                                           filter_indices=index)  #filter_nb
                im = Image.fromarray(img)
                d = ImageDraw.Draw(im)
                d.text((10, 10),
                       layer + " Filter: " + str(index),
                       fill=(255, 255, 0))
                print(layer + " Filter: " + str(index))
                im.save(namePath)
                print('created ' + namePath)
            else:
                print(namePath + ' already exists' +
                      str(os.path.exists(namePath)))
Example #4
0
def check_get_selected_filters(model, layer, filters):
    if filters == "all":
        return None
    elif int(filters) in range(0, get_num_filters(model.layers[layer])):
        return int(filters)
    else:
        print("There is no such filter.")
        sys.exit(1)
Example #5
0
def handle_data():
    layer = request.form.get('layer_select')
    filter_str = request.form['filter_nb']
    layer_idx = utils.find_layer_idx(model, layer)
    filters = len(
        np.random.permutation(get_num_filters(model.layers[layer_idx]))[:10])
    if filters > 0:
        if len(filter_str) == 0:
            visu.layer_visualisation(model, layer, 0, filters)
        elif filter_str.isdigit():
            filter_nb = int(filter_str)
            visu.layer_visualisation(model, layer, filter_nb, filter_nb)
    else:
        print("filters nb : " + filters)
    return (index())
def visualize_conv_filters(output_dir, model, layer_name):
  layer_idx = utilsvis.find_layer_idx(model, layer_name)

  filters = np.arange(get_num_filters(model.layers[layer_idx]))
  vis_images = []

  for idx in tqdm(filters):
      img = visualize_activation(model, layer_idx, filter_indices=idx)
      img = utilsvis.draw_text(img, 'Filter {}'.format(idx))    
      vis_images.append(img)

  stitched = utilsvis.stitch_images(vis_images, cols=32)    

  path = os.path.join(output_dir, '{}.png')
  imsave(path, stitched)
Example #7
0
def vis_5():
    from vis.visualization import get_num_filters
    # The name of the layer we want to visualize
    # You can see this in the model definition.
    layer_name = 'conv2d_1'
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Visualize all filters in this layer.
    filters = np.arange(get_num_filters(model.layers[layer_idx]))

    # Generate input image for each filter.
    for idx in filters:
        print('filter_indices: ' + str(idx))
        img = visualize_activation(model, layer_idx, filter_indices=idx)
        save_image(img, layer_name + '_filter_' + str(idx) + '.png')
Example #8
0
def visualize_all_activations_3d(model, destination_path):
    """
    Get all activations of all Conv3D layers for each filter
    Saving 3D activations in destination_path as .gif files in
    separate folders for each convolutional layer during the process
    :param model: keras sequential model with Conv3D layers
    :param destination_path: path where to save gifs
    :return: all activations from all layers
    """

    # getting indices of all convolutional layers
    conv_layer_indices = []
    number_of_layers = len(model.layers)
    for i in range(number_of_layers):
        if type(model.layers[i]) is keras.layers.convolutional.Conv3D:
            conv_layer_indices.append(i)

    # getting number of convolutional layers
    number_of_conv_layers = len(conv_layer_indices)

    # getting number of filters for each convolutional layer
    number_of_filters = []
    for i in conv_layer_indices:
        number_of_filters.append(get_num_filters(model.layers[i]))

    # getting activations for each convolutional layer
    all_activations = []
    for l in range(number_of_conv_layers):
        gif_folder = destination_path + "/" + model.layers[
            conv_layer_indices[l]].name + "/"
        #created directory for each convolutional layer
        if not os.path.exists(gif_folder):
            os.makedirs(gif_folder)
            activations = []
            for f in number_of_filters[l]:
                # getting activation
                activation = visualize_activation(
                    model, layer_idx=conv_layer_indices[l], filter_indices=f)
                activations.append(activation)
                # saving activation
                clip = ImageSequenceClip(list(activation), fps=10).resize(1.0)
                gif_name = model.layers[conv_layer_indices[
                    l]].name + "_" + "activation" + str(f) + ".gif"
                clip.write_gif(gif_folder + gif_name, fps=10)
            all_activations.append(activations)
    return all_activations
Example #9
0
def plt_activation(model_path, layer_idx=-1, max_iter=200, **kwargs):
    """
    Plot activation of a given layer in a model by generating an image that
    maximizes the output of all `filter_indices` in the given `layer_idx`.
    Args:
        model_path: Path to the model file.
        layer_idx: Index of the layer to plot.
        max_iter: Maximum number of iterations to generate the input image.
        kwargs: Unused arguments.

    Returns:

    """
    model = import_model(model_path)
    model = prediction_layer_linear_activation(model)
    if type(model.layers[layer_idx]) == keras.layers.Dense:
        img = vvis.visualize_activation(model,
                                        layer_idx,
                                        max_iter=max_iter,
                                        filter_indices=None)
    elif type(model.layers[layer_idx]) == keras.layers.Conv2D:
        filters = np.arange(vvis.get_num_filters(model.layers[layer_idx]))

        # Generate input image for each filter.
        vis_images = []
        for idx in tqdm.tqdm(filters):
            act_img = vvis.visualize_activation(model,
                                                layer_idx,
                                                max_iter=max_iter,
                                                filter_indices=idx)

            vis_images.append(act_img)

        # Generate stitched image palette with 8 cols.
        img = vutils.stitch_images(vis_images, cols=8)
    else:
        raise TypeError("Invalid Layer type. model.layers[{}] is {}, "
                        "only Dense and Conv2D layers can be used".format(
                            str(layer_idx),
                            str(type(model.layers[layer_idx]))))
    plt.axis("off")
    if len(img.shape) == 2 or img.shape[2] == 1:
        plt.imshow(img.reshape(img.shape[0:2]), cmap="gray")
    else:
        plt.imshow(img)
    plt.show()
def layered_actmax(sample_count):
    for layer_nm in layerlist_for_layered_actmax:
        layer_idx = utils.find_layer_idx(model, layer_nm)
        num_filters = get_num_filters(model.layers[layer_idx])
        drawn_filters = random.choices(np.arange(num_filters), k=sample_count)
        for filter_id in drawn_filters:
            img = visualize_activation(model,
                                       layer_idx,
                                       filter_indices=filter_id,
                                       input_modifiers=[Jitter(16)])
            img = img.reshape(IMG_SIZE, IMG_SIZE, IMG_DEPTH)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            plt.imshow(img, cmap='gray')
            img_path = os.path.join(IMG_DIR,
                                    layer_nm + '_' + str(filter_id) + '.jpg')
            plt.imsave(img_path, img)
            print(f'Saved layer {layer_nm}/{filter_id} to file!')
    print('done!')
Example #11
0
def Get_Attributions(model,
                     data,
                     penultimate_layer_idx=-10,
                     layer_of_interest_idx=-1,
                     colormap='jet',
                     filter_indices=0,
                     mode='grad-cam'):
    from vis.visualization import visualize_cam, visualize_saliency, visualize_activation, get_num_filters

    cmap = plt.cm.get_cmap(colormap)
    if mode == 'grad-cam':
        grads = visualize_cam(model,
                              layer_of_interest_idx,
                              filter_indices=filter_indices,
                              seed_input=data,
                              penultimate_layer_idx=penultimate_layer_idx,
                              backprop_modifier='guided')
        grads = (grads - np.min(grads)) / (np.max(grads) - np.min(grads))
        heatmap = cmap(grads)

    elif mode == 'saliency':
        grads = visualize_saliency(model,
                                   layer_of_interest_idx,
                                   filter_indices=filter_indices,
                                   seed_input=data,
                                   backprop_modifier='guided')
        grads = (grads - np.min(grads)) / (np.max(grads) - np.min(grads))
        heatmap = cmap(grads)

    elif mode == 'activations':
        vis_activations = []
        filters = np.arange(
            0, get_num_filters(model.layers[layer_of_interest_idx]))
        for idx in filters:
            grads = visualize_activation(model,
                                         layer_of_interest_idx,
                                         filter_indices=idx)
            grads = (grads - np.min(grads)) / (np.max(grads) - np.min(grads))
            grads = cmap(grads)
            vis_activations.append(grads)
        heatmap = vis_activations
    return heatmap
Example #12
0
def plot_activations(model, layer_name, num):
    layer_idx = utils.find_layer_idx(model, layer_name)

    filters = np.arange(get_num_filters(model.layers[layer_idx]))[:num]

    # Generate input image for each filter.
    vis_images = []
    for idx in tqdm_notebook(filters, 'Generating images'):
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=idx,
                                   input_range=(0., 1.))
        vis_images.append(img)

    # Generate stitched image palette with 8 cols.
    stitched = utils.stitch_images(vis_images, cols=8)
    plt.figure(figsize=(16, 16))
    plt.axis('off')
    plt.imshow(stitched)
    plt.title(layer_name)
Example #13
0
    def plot_conv_weights(self):
        PATH = os.path.dirname(__file__)
        cls = _get_conv_layers(self.model)
        i=0
        for layer in cls :
            layer_name = layer.name
            print("{}/{} : {}".format(i,len(cls),layer_name))
            layer_idx = utils.find_layer_idx(self.model,layer_name)

            filters = np.arange(get_num_filters(self.model.layers[layer_idx]))
            vis_images = []
            for idx in filters[:64]:
                img = visualize_activation(self.model, layer_idx, filter_indices=idx)
                # Utility to overlay text on image.
                img = utils.draw_text(img, 'Filter {}'.format(idx))
                vis_images.append(img)
            # Generate stitched image palette with 8 cols.
            stitched = utils.stitch_images(vis_images, cols=8)
            plt.axis('off')
            plt.imsave(join(PATH,'heatmaps/'+layer_name+'.jpg'),stitched)
            i+=1
Example #14
0
def maxactivation_montage(model,
                          layer_name,
                          nx=None,
                          ny=None,
                          dx=4,
                          dy=4,
                          size=64):
    layer_idx = get_layer_idx(model, layer_name)
    n_filters = get_num_filters(model.layers[layer_idx])

    img_stack = np.empty((size, size, n_filters), dtype='u1')

    for k in range(n_filters):
        img = visualize_activation(model, layer_idx, filter_indices=k)
        img.shape = img.shape[:2]
        img_stack[:, :, k] = resize_image_array(img, size, size)

    if nx is None:
        nx, ny = find_good_shape(n_filters)

    img = tile_images(img_stack, nx, ny, dx, dy, fill=0)
    img = Image.fromarray(img, mode='L')

    return img
Example #15
0
import keras
import numpy as np
import cv2
import sys
import os
from vis.utils import utils
from vis.visualization import get_num_filters

if not os.path.exists("single_neuron/block1_conv1"):
    os.makedirs("single_neuron/block1_conv1")

model = keras.applications.VGG16()

layer_idx = utils.find_layer_idx(model, 'block1_conv1')

num_filters = get_num_filters(model.layers[layer_idx])

print(model.layers[layer_idx].get_weights()[0].shape)

max_v = np.amax(model.layers[layer_idx].get_weights()[0])
min_v = np.amin(model.layers[layer_idx].get_weights()[0])

print(max_v)
print(min_v)

pesos = model.layers[layer_idx].get_weights()[0].copy()
if min_v < 0:
    pesos = pesos + abs(min_v)
    max_v = max_v + abs(min_v)

pesos = pesos * (255.0 / max_v)
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
#layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear

plt.rcParams['figure.figsize'] = (50, 50)

# The name of the layer we want to visualize
# You can see this in the model definition.
layer_name = 'preds'
layer_idx = utils.find_layer_idx(model, layer_name)
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# Visualize all filters in this layer.
filters = np.arange(get_num_filters(model.layers[layer_idx]))

#model.layers[layer_idx].activation = activations.linear
#model = utils.apply_modifications(model)

#This is the output node we want to maximize.
#Generate input image for each filter.
#for output_idx in filters[0:10]:
#    # Lets turn off verbose output this time to avoid clutter and just see the output.
#    img = visualize_activation(model, layer_idx, filter_indices=output_idx, input_range=(0., 1.),input_modifiers=[Jitter(16)])
#    plt.figure()
#    plt.title('Networks perception of {}'.format(output_idx))
#    plt.imshow(img[...,0])
#plt.show()

#img = visualize_activation(model,layer_idx,filter_indices =7,max_iter=500,input_range=(0., 1.),input_modifiers=[Jitter(16)])
Example #17
0
 def get_num_of_channels(self, name):
     idx = utils.find_layer_idx(self.model, name)
     return get_num_filters(self.model.layers[idx])
Example #18
0
File: visual.py Project: Daiver/jff
    #modelName = "/home/daiver/coding/jff/py/keras_shape_reg/checkpoints/2017-05-19 12:47:16.562989_ep_149_train_l_0.00872_test_l_0.02065.h5"
    modelName = "/home/daiver/coding/jff/py/keras_shape_reg/checkpoints/2017-05-18 16:43:27.041387_ep_9499_train_l_0.00009_test_l_0.00706.h5"
    model = keras.models.load_model(modelName)
    print model.layers[0].name#conv1
    print model.layers[3].name#conv2
    print model.layers[5].name#conv3
    print model.layers[8].name#conv4
    print model.layers[10].name#conv5
    print model.layers[13].name#conv6
    print model.layers[15].name#conv7
    print model.layers[18].name#conv8
    print model.layers[20].name#conv8
    print model.layers[25].name#conv9
    #exit()
    
    layer_idx = 25
    num_filters = get_num_filters(model.layers[layer_idx])
    print num_filters
    filters = np.arange(get_num_filters(model.layers[layer_idx]))[:32]
    vis_images = []
    for idx in filters:
	img = visualize_activation(model, layer_idx, filter_indices=idx) 
	img = utils.draw_text(img, str(idx))
	vis_images.append(img)

    # Generate stitched image palette with 8 cols.
    stitched = utils.stitch_images(vis_images, cols=8)    
    plt.axis('off')
    plt.imshow(stitched)
    plt.show()
                    [40, 48, 52, 54, 81, 107, 224, 226],
                    [58, 79, 86, 216, 307, 426, 497, 509],
                    [2, 7, 41, 84, 103, 306, 461, 487]]

for layer_name in [
        'block1_conv2', 'block2_conv2', 'block3_conv3', 'block4_conv3',
        'block5_conv3'
]:
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Visualize all filters in this layer.
    if selected_filters:
        filters = selected_filters[i]
    else:
        filters = sorted(
            np.random.permutation(get_num_filters(
                model.layers[layer_idx]))[:max_filters])
    selected_indices.append(filters)

    # Generate input image for each filter.
    for idx in filters:
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=idx,
                                   tv_weight=0.,
                                   input_modifiers=[Jitter(0.05)],
                                   max_iter=300)
        vis_images[i].append(img)

    # Generate stitched image palette with 4 cols so we get 2 rows.
    stitched = utils.stitch_images(vis_images[i], cols=4)
    plt.figure(figsize=(20, 30))
Example #20
0
from vis.utils import utils
from vis.utils.vggnet import VGG16
#from vis.visualization import visualize_class_activation, get_num_filters
from vis.visualization import visualize_class_activation, get_num_filters
# Build the VGG16 network with ImageNet weights
#model = mout#VGG16(weights='imagenet', include_top=True)
print('Model loaded.')

# The name of the layer we want to visualize
# (see model definition in vggnet.py)
layer_name = 'vizblock'
layer_idx = 4  #[idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

# Visualize all filters in this layer.
filters = np.arange(get_num_filters(model.layers[layer_idx]))

# Generate input image for each filter. Here `text` field is used to overlay `filter_value` on top of the image.
vis_images = []
for idx in range(5):
    img = visualize_class_activation(model, layer_idx, filter_indices=idx)
    #img = utils.draw_text(img, str(idx))
    vis_images.append(img)

# Generate stitched image palette with 8 cols.
stitched = utils.stitch_images(vis_images, cols=8)
plt.axis('off')
plt.imshow(stitched)
plt.title(layer_name)
plt.show()
Example #21
0
def vis(img):
    if os.path.isfile('model_{}_.h5'.format(model_version)):
        print 'loading model...'
        cnn = load_cnn_model()
        cnn.load_weights('model_{}_.h5'.format(model_version))

        # list all layers in loaded model.
        layer_name = "output_layer"
        layer_idx = [
            idx for idx, layer in enumerate(cnn.layers)
            if layer.name == layer_name
        ][0]

        # selected layers to visualise.
        layers = [
            'conv_layer_1', 'conv_layer_2', 'conv_layer_3', 'output_layer'
        ]

        # visualise convnet visualisation for each layer, place them in a subplot.
        for layer_name in layers:
            print "Generating visualisation of {}".format(layer_name)
            layer_idx = [
                idx for idx, layer in enumerate(cnn.layers)
                if layer.name == layer_name
            ][0]

            if 'conv' not in layer_name:
                plt.figure()
                for idx, e in enumerate(emotions):
                    plt.subplot(6, 6, idx + 1)
                    plt.text(1, 7, '{}'.format(e))
                    img = visualize_activation(cnn,
                                               layer_idx,
                                               filter_indices=idx,
                                               max_iter=750)
                    img = array_to_img(img.reshape(3, w, h))
                    plt.axis('off')
                    plt.imshow(img)

                plt.suptitle('Visualisation of the Output Layer')
                plt.savefig('{}.png'.format(layer_name), bbox_inches='tight')
                plt.show()
                break

            filters = np.arange(get_num_filters(cnn.layers[layer_idx]))

            images = []
            for idx in filters:
                img = visualize_activation(cnn,
                                           layer_idx,
                                           tv_weight=0,
                                           verbose=False,
                                           filter_indices=idx,
                                           max_iter=750)
                img = array_to_img(img.reshape(3, w, h))
                images.append(img)

            plt.figure()
            for idx, i in enumerate(images):
                plt.subplots_adjust(wspace=0, hspace=0)
                plt.subplot(6, 6, idx + 1)
                plt.text(0, 15, 'Filter {}'.format(idx))
                plt.axis('off')
                plt.imshow(i)

            plt.suptitle('Visualisation of Convolution Layer {}'.format(
                layer_name[len(layer_name) - 1]))
            plt.savefig('{}.png'.format(layer_name), bbox_inches='tight')
            plt.show()

    else:
        print 'model does not exist, train the network first.'