Esempio n. 1
0
def visualize_multiple_categories(show=True):
    """Example to show how to visualize images that activate multiple categories
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # Visualize [20] (ouzel) and [20, 71] (An ouzel-scorpion :D)
    indices = [20, [20, 71]]
    images = []
    for idx in indices:
        img = visualize_class_activation(model,
                                         layer_idx,
                                         filter_indices=idx,
                                         max_iter=500)
        img = utils.draw_text(img, utils.get_imagenet_label(idx))
        images.append(img)

    # Easily stitch images via `utils.stitch_images`
    stitched = utils.stitch_images(images)
    if show:
        plt.axis('off')
        plt.imshow(stitched)
        plt.title('Multiple category visualization')
        plt.show()
Esempio n. 2
0
def generate_cam(show=True):
    """Generates a heatmap via grad-CAM method.
    First, the class prediction is determined, then we generate heatmap to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    for path in [
            'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Tigerwater_edit2.jpg/170px-Tigerwater_edit2.jpg'
    ]:
        seed_img = utils.load_img(path, target_size=(224, 224))

        # Convert to BGR, create input with batch_size: 1, and predict.
        bgr_img = utils.bgr2rgb(seed_img)
        img_input = np.expand_dims(img_to_array(bgr_img), axis=0)
        pred_class = np.argmax(model.predict(img_input))

        heatmap = visualize_cam(model, layer_idx, [pred_class], seed_img)
        if show:
            plt.axis('off')
            plt.imshow(heatmap)
            plt.title('Attention - {}'.format(
                utils.get_imagenet_label(pred_class)))
            plt.show()
Esempio n. 3
0
def visualize_multiple_categories():
    """Example to show how to visualize images that activate multiple categories
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # Visualize [20] (ouzel) and [20, 71] (An ouzel-scorpion :D)
    indices = [20, [20, 71]]
    images = [
        visualize_activation(model,
                             layer_idx,
                             filter_indices=idx,
                             text=utils.get_imagenet_label(idx),
                             max_iter=500) for idx in indices
    ]
    cv2.imshow('Multiple category visualization', utils.stitch_images(images))
    cv2.waitKey(0)
Esempio n. 4
0
def generate_saliceny_map(show=True):
    """Generates a heatmap indicating the pixels that contributed the most towards
    maximizing the filter output. First, the class prediction is determined, then we generate heatmap
    to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    for path in ['../resources/ouzel.jpg', '../resources/ouzel_1.jpg']:
        seed_img = utils.load_img(path, target_size=(224, 224))
        pred_class = np.argmax(
            model.predict(np.array([img_to_array(seed_img)])))
        heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img)

        if show:
            cv2.imshow(
                'Saliency - {}'.format(utils.get_imagenet_label(pred_class)),
                heatmap)
            cv2.waitKey(0)
Esempio n. 5
0
def generate_saliceny_map(show=True):
    """Generates a heatmap indicating the pixels that contributed the most towards
    maximizing the filter output. First, the class prediction is determined, then we generate heatmap
    to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    for path in ['../resources/ouzel.jpg', '../resources/ouzel_1.jpg']:
        seed_img = utils.load_img(path, target_size=(224, 224))

        # Convert to BGR, create input with batch_size: 1, and predict.
        bgr_img = utils.bgr2rgb(seed_img)
        img_input = np.expand_dims(img_to_array(bgr_img), axis=0)
        pred_class = np.argmax(model.predict(img_input))

        heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img)
        if show:
            plt.axis('off')
            plt.imshow(heatmap)
            plt.title('Saliency - {}'.format(
                utils.get_imagenet_label(pred_class)))
            plt.show()
Esempio n. 6
0
def generate_cam(show=True):
    """Generates a heatmap via grad-CAM method.
    First, the class prediction is determined, then we generate heatmap to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    for path in [
            'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Tigerwater_edit2.jpg/170px-Tigerwater_edit2.jpg'
    ]:
        seed_img = utils.load_img(path, target_size=(224, 224))
        pred_class = np.argmax(
            model.predict(np.array([img_to_array(seed_img)])))
        heatmap = visualize_cam(model, layer_idx, [pred_class], seed_img)

        if show:
            cv2.imshow(
                'Attention - {}'.format(utils.get_imagenet_label(pred_class)),
                heatmap)
            cv2.waitKey(0)
Esempio n. 7
0
def visualize_multiple_same_filter(num_runs=3, show=True):
    """Example to show how to visualize same filter multiple times via different runs.

    Args:
        num_runs: The number of times the same filter is visualized
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

    # 20 is the imagenet category for 'ouzel'
    indices = [20] * num_runs
    images = []
    for idx in indices:
        img = visualize_activation(model, layer_idx, filter_indices=idx, max_iter=500)
        img = utils.draw_text(img, utils.get_imagenet_label(idx))
        images.append(img)

    # Easily stitch images via `utils.stitch_images`
    stitched = utils.stitch_images(images)
    if show:
        plt.axis('off')
        plt.imshow(stitched)
        plt.title('Multiple runs of ouzel')
        plt.show()
Esempio n. 8
0
def visualize_random(num_categories=10, show=True):
    """Example to show how to visualize multiple filters via activation maximization.

    Args:
        num_categories: The number of random categories to visualize. (Default Value = 5)
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

    # Visualize couple random categories from imagenet.
    indices = np.random.permutation(1000)[:num_categories]
    images = []
    for idx in indices:
        img = visualize_activation(model, layer_idx, filter_indices=idx, max_iter=500)
        img = utils.draw_text(img, utils.get_imagenet_label(idx))
        images.append(img)

    # Easily stitch images via `utils.stitch_images`
    stitched = utils.stitch_images(images)
    if show:
        plt.axis('off')
        plt.imshow(stitched)
        plt.title('Random imagenet categories')
        plt.show()
Esempio n. 9
0
def visualize_random(num_categories=10):
    """Example to show how to visualize multiple filters via activation maximization.

    Args:
        num_categories: The number of random categories to visualize. (Default Value = 5)
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # Visualize couple random categories from imagenet.
    indices = np.random.permutation(1000)[:num_categories]
    images = [
        visualize_activation(model,
                             layer_idx,
                             filter_indices=idx,
                             text=utils.get_imagenet_label(idx),
                             max_iter=500) for idx in indices
    ]

    # Easily stitch images via `utils.stitch_images`
    cv2.imshow('Random imagenet categories', utils.stitch_images(images))
    cv2.waitKey(0)
Esempio n. 10
0
def visualize_multiple_same_filter(num_runs=3):
    """Example to show how to visualize same filter multiple times via different runs.

    Args:
        num_runs: The number of times the same filter is visualized
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [
        idx for idx, layer in enumerate(model.layers)
        if layer.name == layer_name
    ][0]

    # 20 is the imagenet category for 'ouzel'
    indices = [20] * num_runs
    images = [
        visualize_activation(model,
                             layer_idx,
                             filter_indices=idx,
                             text=utils.get_imagenet_label(idx),
                             max_iter=500) for idx in indices
    ]
    cv2.imshow('Multiple runs of ouzel', utils.stitch_images(images))
    cv2.waitKey(0)
Esempio n. 11
0
def generate_cam_from_image(image, model=None, returnAsImage=True, layer=None):
    """Generates a heatmap via grad-CAM method.
    First, the class prediction is determined, then we generate heatmap to visualize that class.
    """
    # Build the VGG16 network with ImageNet weights
    if model is None:
        model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    if layer is None:
        layer_name = 'predictions'
        layer_idx = [
            idx for idx, layer in enumerate(model.layers)
            if layer.name == layer_name
        ][0]
    else:
        layer_idx = layer
    #seed_img = Image.open(image)
    seed_img = Image.fromarray(image)

    seed_img = seed_img.resize((configs.img_width, configs.img_height))

    # Convert to BGR, create input with batch_size: 1, and predict.
    bgr_img = utils.bgr2rgb(np.asarray(seed_img))
    img_input = np.expand_dims(bgr_img, axis=0)
    pred_class = np.argmax(model.predict(img_input))

    heatmap = visualize_cam(model, layer_idx, [pred_class],
                            np.asarray(seed_img))
    if returnAsImage is True:
        return Image.fromarray(heatmap)
    else:
        return heatmap
def generate_opt_gif():
    """Example to show how to generate the gif of optimization progress.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]
    output_class = [20]

    optimizer_params = {
        'max_iter': 500,
        'verbose': True,
        'callbacks': [GifGenerator('opt_progress')]
    }
    visualize_class_activation(model, layer_idx, filter_indices=output_class, **optimizer_params)
Esempio n. 13
0
def visualize_multiple_same_filter():
    """Example to show how to visualize same filter multiple times via different runs.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])

    # 20 is the imagenet category for 'ouzel'
    indices = [20, 20, 20]
    idx_label_map = dict((idx, utils.get_imagenet_label(idx)) for idx in indices)

    vis_img = visualize_activation(model.input, layer_dict[layer_name], max_iter=500,
                                   filter_indices=indices, idx_label_map=idx_label_map)
    cv2.imshow('Multiple runs of ouzel', vis_img)
    cv2.waitKey(0)
Esempio n. 14
0
def visualize_random():
    """Example to show how to visualize multiple filters via activation maximization
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])

    # Visualize couple random categories from imagenet.
    indices = np.random.permutation(1000)[:15]
    idx_label_map = dict((idx, utils.get_imagenet_label(idx)) for idx in indices)

    vis_img = visualize_activation(model.input, layer_dict[layer_name], max_iter=500,
                                   filter_indices=indices, idx_label_map=idx_label_map)
    cv2.imshow('Random imagenet output categories', vis_img)
    cv2.waitKey(0)
Esempio n. 15
0
def visualize_multiple_categories():
    """Example to show how to visualize images that activate multiple categories
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])

    # Visualize [20] (ouzel) and [20, 71] (An ouzel-scorpion :D)
    indices = [20, [20, 71]]
    idx_label_map = dict((idx, utils.get_imagenet_label(idx)) for idx in [20, 71])

    vis_img = visualize_activation(model.input, layer_dict[layer_name], max_iter=500,
                                   filter_indices=indices, idx_label_map=idx_label_map)
    cv2.imshow('Multiple category visualization', vis_img)
    cv2.waitKey(0)
Esempio n. 16
0
def main():
    """Generates a heatmap indicating the pixels that contributed the most towards
    maximizing the filter output.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])

    for path in ['../resources/ouzel.jpg', '../resources/ouzel_1.jpg']:
        seed_img = utils.load_img(path, target_size=(224, 224))
        # 20 is the imagenet category for 'ouzel'
        heatmap = visualize_saliency(model.input, layer_dict[layer_name], [20],
                                     seed_img)
        cv2.imshow('Importance map', heatmap)
        cv2.waitKey(0)
def generate_opt_gif():
    """Example to show how to generate the gif of optimization progress.
    This example also shows how to use the optimizer directly with losses.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    output_class = [20]

    losses = [(ActivationMaximization(layer_dict[layer_name],
                                      output_class), 2),
              (LPNorm(model.input), 10), (TotalVariation(model.input), 10)]
    opt = Optimizer(model.input, losses)
    opt.minimize(max_iter=500,
                 verbose=True,
                 callbacks=[GifGenerator('opt_progress')])
def generate_opt_gif():
    """Example to show how to generate the gif of optimization progress.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    output_class = [20]

    losses = [(ActivationMaximization(layer_dict[layer_name],
                                      output_class), 1), (LPNorm(), 10),
              (TotalVariation(), 1)]
    opt = Optimizer(model.input, losses)

    # Jitter is used as a regularizer to create crisper images, but it makes gif animation ugly.
    opt.minimize(max_iter=500,
                 verbose=True,
                 jitter=0,
                 progress_gif_path='opt_progress')
Esempio n. 19
0
from keras.preprocessing.image import img_to_array
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
from vis.utils import utils
from vis.utils.vggnet import VGG16
from vis.utils.inception_v3 import InceptionV3, conv2d_bn
from vis.visualization import visualize_saliency, visualize_cam, visualize_activation, get_num_filters

def preprocess_input(x):
    x /= 255.
    x -= 0.5
    x *= 2.
    return x

#  Build the VGG16 network with ImageNet weights
model_vgg = VGG16(weights='imagenet', include_top=True)
print('Model loaded.')
model = model_vgg

# Build the InceptionV3 network with ImageNet weights
# model = InceptionV3(weights='imagenet', include_top=True)
# print('Model loaded.')

# The name of the layer we want to visualize
# (see model definition in vggnet.py or inception_v3.py)
layer_name = 'predictions'
layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]

# Images corresponding to tiger, penguin, dumbbell, speedboat, spider
image_paths = [
    "http://www.tigerfdn.com/wp-content/uploads/2016/05/How-Much-Does-A-Tiger-Weigh.jpg",