Ejemplo n.º 1
0
def ShowModelActivationMaximization(model, layer_name):
    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    filter_idx = 0
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=filter_idx,
                               input_range=(0., 1.))
    plt.imshow(img[..., 0])
    file_name = "tmp/ActivationMax/" + layer_name + ".jpg"
    plt.imsave(file_name, img[..., 0])
    # plt.show()

    classes_num = model.output_shape[1]
    for output_idx in np.arange(classes_num):
        # Lets turn off verbose output this time to avoid clutter and just see the output.
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=output_idx,
                                   input_range=(0., 1.))
        img_name = "tmp/ActivationMax/%s_%s.jpg" % (
            layer_name, TrainingDefines.ACTION_NAME[output_idx])
        plt.imsave(img_name, img[..., 0])

    return
Ejemplo n.º 2
0
def visualize(model):
    from vis.visualization import visualize_activation
    from vis.utils import utils
    from keras import activations

    visualization_path = Path("visualization/auto-tagger/")

    if not visualization_path.exists():
        visualization_path.mkdir(parents=True)

    # visualize each class
    for t in range(len(tags)):
        save_path = visualization_path / "out_{}.png".format(tags[t])
        if save_path.exists():
            print("{} already visualized".format(tags[t]))
            continue

        layer_idx = utils.find_layer_idx(model, "dense_2")

        # Swap softmax with linear
        model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(model)

        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=t,
                                   verbose=True)
        array_to_img(img).save(save_path)
        print("saved to {}".format(str(save_path)))

    # visualize each layer
    for layer_name in [layer.name for layer in model.layers]:
        save_path = visualization_path / "{}.png".format(layer_name)
        if save_path.exists():
            print("{} already visualized".format(layer_name))
            continue
        if any([x in layer_name for x in ["batch_normalization", "input"]]):
            print("skipping visualization of {}".format(layer_name))
            continue

        # Utility to search for layer index by name.
        # Alternatively we can specify this as -1 since it corresponds to the last layer.
        layer_idx = utils.find_layer_idx(model, layer_name)

        # Swap softmax with linear
        model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(model)

        img = visualize_activation(model, layer_idx, verbose=True)
        array_to_img(img).save(save_path)
        print("saved to {}".format(str(save_path)))
def cnnfeature_vis(model):
    from vis.visualization import visualize_activation
    from vis.utils import utils
    from keras import activations

    from matplotlib import pyplot as plt
    # %matplotlib inline
    plt.rcParams['figure.figsize'] = (18, 6)

    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    layer_idx = utils.find_layer_idx(model, 'preds')

    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    filter_idx = 0
    img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
    plt.imshow(img[..., 0])

    for output_idx in np.arange(10):
        # Lets turn off verbose output this time to avoid clutter and just see the output.
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=output_idx,
                                   input_range=(0., 1.))
        plt.figure()
        plt.title('Networks perception of {}'.format(output_idx))
        plt.imshow(img[..., 0])

    # Visualizations without swapping softmax
    # Swap linear back with softmax
    model.layers[layer_idx].activation = activations.softmax
    model = utils.apply_modifications(model)

    for output_idx in np.arange(10):
        # Lets turn off verbose output this time to avoid clutter and just see the output.
        # Visualizations without swapping softmax
        img = visualize_activation(model,
                                   layer_idx,
                                   filter_indices=output_idx,
                                   input_range=(0., 1.))
        plt.figure()
        plt.title('Networks perception of {}'.format(output_idx))
        plt.imshow(img[..., 0])
        plt.show()
Ejemplo n.º 4
0
def visualize_multiple_categories(show=True):
    """Example to show how to visualize images that activate multiple categories
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

    # Visualize [20] (ouzel) and [20, 71] (An ouzel-scorpion :D)
    indices = [20, [20, 71]]
    images = []
    for idx in indices:
        img = visualize_activation(model, layer_idx, filter_indices=idx, max_iter=500)
        img = utils.draw_text(img, utils.get_imagenet_label(idx))
        images.append(img)

    # Easily stitch images via `utils.stitch_images`
    stitched = utils.stitch_images(images)
    if show:
        plt.axis('off')
        plt.imshow(stitched)
        plt.title('Multiple category visualization')
        plt.show()
Ejemplo n.º 5
0
def visualize_multiple_same_filter(num_runs=3, show=True):
    """Example to show how to visualize same filter multiple times via different runs.

    Args:
        num_runs: The number of times the same filter is visualized
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

    # 20 is the imagenet category for 'ouzel'
    indices = [20] * num_runs
    images = []
    for idx in indices:
        img = visualize_activation(model, layer_idx, filter_indices=idx, max_iter=500)
        img = utils.draw_text(img, utils.get_imagenet_label(idx))
        images.append(img)

    # Easily stitch images via `utils.stitch_images`
    stitched = utils.stitch_images(images)
    if show:
        plt.axis('off')
        plt.imshow(stitched)
        plt.title('Multiple runs of ouzel')
        plt.show()
Ejemplo n.º 6
0
def layer_visualisation(model, layer, start, end):
    layer_idx = utils.find_layer_idx(model, layer)
    filters = np.random.permutation(get_num_filters(
        model.layers[layer_idx]))[:10]
    nbFilters = len(filters)
    # create a folder in static/img/ for every layers
    path = "static/img/" + layer + '/'
    if not os.path.isdir(path):
        os.makedirs(path)
    for index in range(start, end + 1):
        if index < nbFilters:
            namePath = img_name(layer, index)
            if not os.path.exists(namePath):
                layer_idx = utils.find_layer_idx(model, layer)  #layer
                plt.rcParams['figure.figsize'] = (18, 6)
                img = visualize_activation(model,
                                           layer_idx,
                                           filter_indices=index)  #filter_nb
                im = Image.fromarray(img)
                d = ImageDraw.Draw(im)
                d.text((10, 10),
                       layer + " Filter: " + str(index),
                       fill=(255, 255, 0))
                print(layer + " Filter: " + str(index))
                im.save(namePath)
                print('created ' + namePath)
            else:
                print(namePath + ' already exists' +
                      str(os.path.exists(namePath)))
Ejemplo n.º 7
0
def save_img(path, savepath, origimg, typeimg, layeridx):

    img = load_img(path, target_size=(224,224))
    x = img_to_array(img) #numpy array
    x = x.reshape(x.shape) #adds on dimension for keras

    model.layers[layeridx].activation = activations.linear
    if typeimg == 'activation':
        img = visualize_activation(model, layeridx, 20, x)

    if typeimg == 'saliency':
        img = visualize_saliency(model, layeridx, 1, x)

    if typeimg == 'cam':
        img = visualize_cam(model, layeridx, 1, x)

    if not os.path.exists('layer-' + savepath):
        os.makedirs('layer-' + savepath)

    if not os.path.exists('image-' + savepath):
        os.makedirs('image-' + savepath)

    combined = str(savepath) + '/' + str(origimg)
    plt.imshow(img)
    plt.savefig('layer-' + combined, dpi=600)
Ejemplo n.º 8
0
def plot_classes(model, number_sequence):
    """

    Creates a keras model visualization of its learning state at a certain epoch depending on the model trained.

    Arguments:
        model (Keras Model Object): The neural network of the keras CNN.
        number_sequence (String): Number sequence primary name.


    """

    # Numbers to visualize
    numbers_to_visualize = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    images_learned = []

    # Visualize
    for number_to_visualize in numbers_to_visualize:
        visualization = visualize_activation(
            model, layer_index, filter_indices=number_to_visualize)
        plt.imshow(visualization[..., 0])
        plt.title(f'MNIST target = {number_to_visualize}')
        plt.savefig(number_sequence + str(number_to_visualize) + ".png")
        images_learned.append(wandb.Image(plt))

    wandb.log({"Learning Visualization for" + number_sequence: images_learned})
Ejemplo n.º 9
0
def visualize_random(num_categories=10, show=True):
    """Example to show how to visualize multiple filters via activation maximization.

    Args:
        num_categories: The number of random categories to visualize. (Default Value = 5)
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

    # Visualize couple random categories from imagenet.
    indices = np.random.permutation(1000)[:num_categories]
    images = []
    for idx in indices:
        img = visualize_activation(model, layer_idx, filter_indices=idx, max_iter=500)
        img = utils.draw_text(img, utils.get_imagenet_label(idx))
        images.append(img)

    # Easily stitch images via `utils.stitch_images`
    stitched = utils.stitch_images(images)
    if show:
        plt.axis('off')
        plt.imshow(stitched)
        plt.title('Random imagenet categories')
        plt.show()
Ejemplo n.º 10
0
def main():
    model = load_model('model2.h5py')
    model.summary()
    # swap softmax activation function to linear
    layer_idx = -1
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)


    layer_names = ['leaky_re_lu_1', 'leaky_re_lu_2', 'leaky_re_lu_3', 'leaky_re_lu_4']
    for lid in range(len(layer_names)):
        layer_name = layer_names[lid]
        layer_idx = utils.find_layer_idx(model, layer_name)
        filters = np.arange(get_num_filters(model.layers[layer_idx]))
        filters = _shuffle(filters)
        vis_images = []
        for idx in range(16):
            indices = filters[idx]
            img = visualize_activation(model, layer_idx, filter_indices=indices, tv_weight=0.,
                               input_modifiers=[Jitter(0.5)])
            vis_images.append(img)
        #img = img.reshape((48, 48))
        #plt.imshow(img, cmap="Blues")
        #plt.show()

        stitched = utils.stitch_images(vis_images, cols=8)
        plt.figure()
        plt.axis('off')
        shape = stitched.shape
        stitched = stitched.reshape((shape[0], shape[1]))
        plt.imshow(stitched)
        plt.title(layer_name)
        plt.tight_layout()
        plt.savefig('Filter_{}.png'.format(lid))
Ejemplo n.º 11
0
def prediction_with_important(brain_net, graph_adjacency, n_nodes=84, kth=100):

    # make prediction for a single graph
    pred_bn, confidences = brain_net.predict(graph_adjacency)

    print('Computing partial derivatives for each samples')
    print('This operation requires time...')

    # graph_adjacency = np.expand_dims(graph_adjacency, axis=0)

    # compute for each edge the partial derivatives according to Simonyan et al (2013)
    heatmap = visualize_activation(brain_net.model, layer_idx=-1, filter_indices=0, seed_input=graph_adjacency)

    # compute the importance of each node by summing importance of edges
    important_nodes = np.sum(abs(heatmap), axis=0)

    # select the nodes belonging to the percentile-th percentile
    #most_important = np.argwhere(important_nodes >= np.percentile(important_nodes, percentile))
    
    # sorting for getting the first k% most important
    vip = [(i, value) for i, value in enumerate(important_nodes)]
    k_vip = sorted(vip, reverse=True, key=lambda x: x[1])[n_nodes - int((kth/100) * n_nodes):]
    print(k_vip)

    #k_vip = list(map(lambda x: x[0]+1, k_vip))

    print('done.')

    #return confidences, pred_bn, most_important
    return confidences, pred_bn, k_vip
Ejemplo n.º 12
0
def visualize_random(num_categories=10):
    """Example to show how to visualize multiple filters via activation maximization.

    Args:
        num_categories: The number of random categories to visualize. (Default Value = 5)
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # Visualize couple random categories from imagenet.
    indices = np.random.permutation(1000)[:num_categories]
    images = [
        visualize_activation(model,
                             layer_idx,
                             filter_indices=idx,
                             text=utils.get_imagenet_label(idx),
                             max_iter=500) for idx in indices
    ]

    # Easily stitch images via `utils.stitch_images`
    cv2.imshow('Random imagenet categories', utils.stitch_images(images))
    cv2.waitKey(0)
def main():
  #  dataset_str = 'rgb'
  dataset_str = 'smooth'

  # load fine tuned model
  #  model = train_and_test([dataset])
  model = load_model('models/vgg16_hybrid_1365_softmax_mit67_' + dataset_str + '.h5')
  
  # use first dense layer
  #  layer_idx = 19
  # first conv layer
  #  layer_idx = 0

  # visualize all convolutional & fully connected layers
  layer_indices = [0,1,3,4,6,7,8,10,11,12,14,15,16,19,20,21]

  nb_filters = 32

  # for each layer, iterate over nb_filters many filters
  for layer_idx in layer_indices:
    print('layer: ' + str(layer_idx))
    for filter_idx in range(nb_filters):
      print('filter: ' + str(filter_idx))
      act = visualize_activation(model, layer_idx, filter_idx).squeeze()

      # directory to save images to
      dir_name = 'mit67_' + dataset_str + '_activations/'

      imsave((dir_name + 'act_' + str(layer_idx) +
        '_' + str(filter_idx) + '.png'),
        act
      )
Ejemplo n.º 14
0
 def find_mai(self, layer_id, filter_id):
     img = visualize_activation(model=self.model_,
                                layer_idx=layer_id,
                                filter_indices=[filter_id],
                                max_iter=500,
                                verbose=False)
     return img
Ejemplo n.º 15
0
    def visualize_activation(self):

        from matplotlib import pyplot as plt
        from vis.utils import utils
        from vis.visualization import visualize_activation, get_num_filters

        vis_images = []
        for i in range(self.config['output_shape'][0]):
            # The name of the layer we want to visualize
            layer_name = 'output_%d' % i
            layer_idx = [
                idx for idx, layer in enumerate(self.net.layers)
                if layer.name == layer_name
            ][0]

            print('Working on %s' % layer_name)
            # Generate input image for each filter. Here `text` field is used to
            # overlay `filter_value` on top of the image.
            for idx in [1, 1, 1]:
                img = visualize_activation(self.net,
                                           layer_idx,
                                           filter_indices=idx,
                                           max_iter=500)
                # img = utils.draw_text(img, TAGS[i])
                vis_images.append(img)

        # Generate stitched image palette with 8 cols.
        stitched = utils.stitch_images(vis_images, cols=3)
        plt.axis('off')
        plt.imshow(stitched)
        plt.title(self.checkpoint_name)
        plt.show()
Ejemplo n.º 16
0
def prediction_with_important_edges(brain_net, graph_adjacency, n_nodes=84, kth=100):

    # make prediction for a single graph
    pred_bn, confidences = brain_net.predict(graph_adjacency)

    print('Computing partial derivatives for each samples')
    print('This operation requires time...')

    # graph_adjacency = np.expand_dims(graph_adjacency, axis=0)

    # compute for each edge the partial derivatives according to Simonyan et al (2013)
    heatmap = visualize_activation(brain_net.model, layer_idx=-1, filter_indices=0, seed_input=graph_adjacency)

    edges = list()

    # getting the triu of the matrix
    triu = np.triu(heatmap)
    n,m = triu.shape
    for i in range(n):
        for j in range(i+1, m):
            edges.append((i+1,j+1,triu[i][j]))

    edges = sorted(edges, key=lambda x: x[2], reverse=True)
    print('done.')
    
    return confidences, pred_bn, edges
Ejemplo n.º 17
0
def maxout(model, layer, filters):
    # Changer l'activation softmax par linear
    model.layers[layer].activation = activations.linear
    model = apply_modifications(model)

    act = visualize_activation(model, layer, filter_indices=filters, tv_weight=1., lp_norm_weight=0., verbose=True, input_modifiers=[Jitter(16)])
    return act
Ejemplo n.º 18
0
def visualize_multiple_same_filter(num_runs=3):
    """Example to show how to visualize same filter multiple times via different runs.

    Args:
        num_runs: The number of times the same filter is visualized
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # 20 is the imagenet category for 'ouzel'
    indices = [20] * num_runs
    images = [
        visualize_activation(model,
                             layer_idx,
                             filter_indices=idx,
                             text=utils.get_imagenet_label(idx),
                             max_iter=500) for idx in indices
    ]
    cv2.imshow('Multiple runs of ouzel', utils.stitch_images(images))
    cv2.waitKey(0)
Ejemplo n.º 19
0
    def visualize_dense_layer(self):
        self.logger.info('Visualizing dense layers')

        # create folder for saving visualization
        save_path = os.path.join(constants.MODEL_DIR, 'Visualization',
                                 self.model_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        # search the last dense layer with the name 'preds'
        layer_idx = utils.find_layer_idx(self.model, 'preds')

        # Swap softmax with linear
        self.model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(self.model)

        # output node we want to maximize
        for class_idx in np.arange(len(constants.CATEGORIES)):
            # Lets turn off verbose output this time to avoid clutter and just see the output.
            img = visualize_activation(model,
                                       layer_idx,
                                       filter_indices=class_idx,
                                       input_range=(0., 1.))
            plt.figure()
            plt.title('Networks perception of {}'.format(class_idx))
            plt.imshow(img[..., 0])

            # save the plot
            plot_name = 'dense-layer-{}.png'.format(
                constants.CATEGORIES[class_idx])
            plt.savefig(os.path.join(save_path, plot_name))
            plt.show()
Ejemplo n.º 20
0
def visualize_multiple_categories():
    """Example to show how to visualize images that activate multiple categories
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # Visualize [20] (ouzel) and [20, 71] (An ouzel-scorpion :D)
    indices = [20, [20, 71]]
    images = [
        visualize_activation(model,
                             layer_idx,
                             filter_indices=idx,
                             text=utils.get_imagenet_label(idx),
                             max_iter=500) for idx in indices
    ]
    cv2.imshow('Multiple category visualization', utils.stitch_images(images))
    cv2.waitKey(0)
Ejemplo n.º 21
0
def vis_2():
    # 20 is the imagenet category for 'ouzel'
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=6,
                               max_iter=500,
                               verbose=True)
    save_image(img, '2.png')
Ejemplo n.º 22
0
 def plot_activation(self):
     layer_idx=-1
     self.model.layers[layer_idx].activation = activations.linear
     model = utils.apply_modifications(self.model)
     for pred_class in range(25)[11:12]:
         print(self.inv_dico.get(pred_class))
         actmap = visualize_activation(model,layer_idx,filter_indices=pred_class)
         plt.imsave('img.jpg',actmap)
Ejemplo n.º 23
0
def plt_activation(model_path, layer_idx=-1, max_iter=200, **kwargs):
    """
    Plot activation of a given layer in a model by generating an image that
    maximizes the output of all `filter_indices` in the given `layer_idx`.
    Args:
        model_path: Path to the model file.
        layer_idx: Index of the layer to plot.
        max_iter: Maximum number of iterations to generate the input image.
        kwargs: Unused arguments.

    Returns:

    """
    model = import_model(model_path)
    model = prediction_layer_linear_activation(model)
    if type(model.layers[layer_idx]) == keras.layers.Dense:
        img = vvis.visualize_activation(model,
                                        layer_idx,
                                        max_iter=max_iter,
                                        filter_indices=None)
    elif type(model.layers[layer_idx]) == keras.layers.Conv2D:
        filters = np.arange(vvis.get_num_filters(model.layers[layer_idx]))

        # Generate input image for each filter.
        vis_images = []
        for idx in tqdm.tqdm(filters):
            act_img = vvis.visualize_activation(model,
                                                layer_idx,
                                                max_iter=max_iter,
                                                filter_indices=idx)

            vis_images.append(act_img)

        # Generate stitched image palette with 8 cols.
        img = vutils.stitch_images(vis_images, cols=8)
    else:
        raise TypeError("Invalid Layer type. model.layers[{}] is {}, "
                        "only Dense and Conv2D layers can be used".format(
                            str(layer_idx),
                            str(type(model.layers[layer_idx]))))
    plt.axis("off")
    if len(img.shape) == 2 or img.shape[2] == 1:
        plt.imshow(img.reshape(img.shape[0:2]), cmap="gray")
    else:
        plt.imshow(img)
    plt.show()
Ejemplo n.º 24
0
def getModelActivation(model, classIdx):
    layer_idx = -1
    act = visualize_activation(model,
                               layer_idx,
                               filter_indices=classIdx,
                               seed_input=None,
                               input_range=(0, 255),
                               backprop_modifier='guided')
    return act
Ejemplo n.º 25
0
def vis_3():
    # 20 is the imagenet category for 'ouzel'
    # Jitter 16 pixels along all dimensions to during the optimization process.
    img = visualize_activation(model,
                               layer_idx,
                               filter_indices=36,
                               max_iter=2000,
                               input_modifiers=[Jitter(16)])
    save_image(img, 'filter_single_6_' + str(37) + '.png')
    def show_layer_activation(self, layer_name, filter_indices):
        layer_idx = vis_utils.find_layer_idx(self.model, layer_name)

        self.model.layers[layer_idx].activation = activations.linear
        vis_model = vis_utils.apply_modifications(self.model)

        img = visualize_activation(vis_model,
                                   layer_idx,
                                   filter_indices=filter_indices)
        show_img_array(img)
Ejemplo n.º 27
0
def visualize_attention_on_image(raw_image,
                                 normalised_image,
                                 model,
                                 layer_index,
                                 filter_indices,
                                 type="saliency"):
    """
    Shows the saliency map of an image, overlayed over the input image

    ATTENTION: seems so to be influenced by the learning phase,
    learning phase has to be 1(for learning)

    :param image:
    :param model:
    :param layer_index:
    :param filter_indices:
    :return: Image with overlayed saliency map
    """
    titles = ['left steering', 'right steering']
    modifiers = [None, 'negate']

    for i, modifier in enumerate(modifiers):

        if type == "saliency":
            heatmap = visualize_saliency(model,
                                         layer_idx=layer_index,
                                         filter_indices=filter_indices,
                                         seed_input=normalised_image,
                                         grad_modifier=modifier,
                                         backprop_modifier='guided')
        elif type == "cam":
            heatmap = visualize_cam(model,
                                    layer_idx=layer_index,
                                    filter_indices=filter_indices,
                                    seed_input=normalised_image,
                                    grad_modifier=modifier)
        elif type == "activation":
            heatmap = visualize_activation(model,
                                           layer_idx=layer_index,
                                           filter_indices=filter_indices,
                                           seed_input=normalised_image,
                                           input_range=(0, 1),
                                           grad_modifier=modifier)

        else:
            print("Select 'saliency' or 'cam' as visualization type!")
            break

        #plt.figure()
        #plt.title(titles[i])

        #plt.imshow(heatmap)

        overlay_colour_on_greyscale(heatmap, raw_image, titles[i])
def show_activation(model, layer_idx):
    from vis.visualization import visualize_activation
    from vis.input_modifiers import Jitter
    # 1 is the imagenet category for 'PNEUMONIA'
    im = visualize_activation(model,
                              layer_idx,
                              filter_indices=None,
                              max_iter=500,
                              input_modifiers=[Jitter(16)],
                              verbose=False)
    plt.imshow(im)
    plt.show()
Ejemplo n.º 29
0
def vis_max(model, save_path):
    '''
        Generate the input image that maximizes the response of each class in the final FC layer
        Please also see keras-vis for details.
    '''
    layer_idx = vutils.find_layer_idx(model, 'predictions')
    model.layers[layer_idx].activation = activations.linear
    model = vutils.apply_modifications(model)
    for ind in range(len(config.classes)):
        print 'Generating for class {}'.format(ind)
        plt.rcParams['figure.figsize'] = (18, 6)
        img = visualize_activation(model, layer_idx, filter_indices=ind)
        mpimg.imsave(save_path + str(ind) + ".png", img)
    print('Done')
Ejemplo n.º 30
0
def visualize_grad(model_weights, data):
	#model: trained model.
	#data: preprocessed image by keras.

	#Note: Put "name = 'predictions'" on the last layer of every model (layer that include 'softmax'). This layer will
	#be the one we gonna visualize to standardize architectures.

	#layer_idx = [idx for idx,layer in enumerate(model.layers) if layer.name == layer_name][0]
	layer_idx = -1

	#original = np.copy(recover(data))
	#cv2.imwrite(path.replace('/scripts/python', '') + '/heatmaps/' + 'original.png', original)

	img = np.reshape(data, (1,) + data.shape)
	prediction = predictData(model_weights, img)
	pred_class = np.argmax(prediction)
	print('image shape: ', img.shape)
	print('prediction: ', prediction)
	print('class: ', pred_class)

	#model_weights.layers[layer_idx].activation = activations.linear
	#model_weights = utils.apply_modifications(model_weights)

	heatmap_activation = visualize_activation(model_weights, layer_idx, filter_indices=pred_class, seed_input=img)
	#plt.rcParams['figure.figsize'] = (18, 6)
	heatmap_activation = np.squeeze(heatmap_activation, axis=2)
	#plt.imshow(heatmap_activation, cmap='jet')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'activation.png', heatmap_activation, cmap='jet')
	#plt.show()
	#cv2.imwrite(path.replace('/scripts/python', '') + '/heatmaps/' + 'activation.png', heatmap_activation)

	heatmap_saliency = visualize_saliency(model_weights, layer_idx, filter_indices=pred_class, seed_input=img)
	#plt.rcParams['figure.figsize'] = (18, 6)
	#heatmap_saliency = np.squeeze(heatmap_saliency, axis=2)
	#plt.imshow(heatmap_saliency, cmap='jet')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency.png', heatmap_saliency, cmap='jet')
	#plt.show()	
	#cv2.imwrite(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency.png', heatmap_saliency)

	heatmap_saliency_g = visualize_saliency(model_weights, layer_idx, filter_indices=pred_class, seed_input=img, backprop_modifier='guided')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency_guided.png', heatmap_saliency_g, cmap='jet')

	heatmap_saliency_r = visualize_saliency(model_weights, layer_idx, filter_indices=pred_class, seed_input=img, backprop_modifier='relu')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'saliency_relu.png', heatmap_saliency_r, cmap='jet')

	heatmap_cam = visualize_cam(model_weights, layer_idx, filter_indices=pred_class, seed_input=img)
	#plt.rcParams['figure.figsize'] = (18, 6)
	#heatmap_cam = np.squeeze(heatmap_cam, axis=2)
	#plt.imshow(heatmap_cam, cmap='jet')
	plt.imsave(path.replace('/scripts/python', '') + '/heatmaps/' + 'cam.png', heatmap_cam, cmap='jet')
Ejemplo n.º 31
0
Archivo: visual.py Proyecto: Daiver/jff
    #modelName = "/home/daiver/coding/jff/py/keras_shape_reg/checkpoints/2017-05-19 12:47:16.562989_ep_149_train_l_0.00872_test_l_0.02065.h5"
    modelName = "/home/daiver/coding/jff/py/keras_shape_reg/checkpoints/2017-05-18 16:43:27.041387_ep_9499_train_l_0.00009_test_l_0.00706.h5"
    model = keras.models.load_model(modelName)
    print model.layers[0].name#conv1
    print model.layers[3].name#conv2
    print model.layers[5].name#conv3
    print model.layers[8].name#conv4
    print model.layers[10].name#conv5
    print model.layers[13].name#conv6
    print model.layers[15].name#conv7
    print model.layers[18].name#conv8
    print model.layers[20].name#conv8
    print model.layers[25].name#conv9
    #exit()
    
    layer_idx = 25
    num_filters = get_num_filters(model.layers[layer_idx])
    print num_filters
    filters = np.arange(get_num_filters(model.layers[layer_idx]))[:32]
    vis_images = []
    for idx in filters:
	img = visualize_activation(model, layer_idx, filter_indices=idx) 
	img = utils.draw_text(img, str(idx))
	vis_images.append(img)

    # Generate stitched image palette with 8 cols.
    stitched = utils.stitch_images(vis_images, cols=8)    
    plt.axis('off')
    plt.imshow(stitched)
    plt.show()