示例#1
0
def visualize_activation_ternary_dynamic(model, layer_idx,alpha=1e-6,filter_indices=None, wrt_tensor=None,
                         seed_input=None, input_range=(-1, 1),
                         backprop_modifier=None, grad_modifier=None,
                         act_max_weight=1, lp_norm_weight=10, tv_weight=10,
                         **optimizer_params):
    """Generates the model input that maximizes the output of all `filter_indices` in the given `layer_idx`, and
    put it in ternary representation
    Args:
        model: The `keras.models.Model` instance. The model input shape must be: `(samples, channels, image_dims...)`
            if `image_data_format=channels_first` or `(samples, image_dims..., channels)` if
            `image_data_format=channels_last`.
        layer_idx: The layer index within `model.layers` whose filters needs to be visualized.
        filter_indices: filter indices within the layer to be maximized.
            If None, all filters are visualized. (Default value = None)
            For `keras.layers.Dense` layer, `filter_idx` is interpreted as the output index.
            If you are visualizing final `keras.layers.Dense` layer, consider switching 'softmax' activation for
            'linear' using [utils.apply_modifications](vis.utils.utils#apply_modifications) for better results.
        wrt_tensor: Short for, with respect to. The gradients of losses are computed with respect to this tensor.
            When None, this is assumed to be the same as `input_tensor` (Default value: None)
        seed_input: Seeds the optimization with a starting input. Initialized with a random value when set to None.
            (Default value = None)
        input_range: Specifies the input range as a `(min, max)` tuple. This is used to rescale the
            final optimized input to the given range. (Default value=(0, 255))
        backprop_modifier: backprop modifier to use. See [backprop_modifiers](vis.backprop_modifiers.md). If you don't
            specify anything, no backprop modification is applied. (Default value = None)
        grad_modifier: gradient modifier to use. See [grad_modifiers](vis.grad_modifiers.md). If you don't
            specify anything, gradients are unchanged (Default value = None)
        act_max_weight: The weight param for `ActivationMaximization` loss. Not used if 0 or None. (Default value = 1)
        lp_norm_weight: The weight param for `LPNorm` regularization loss. Not used if 0 or None. (Default value = 10)
        tv_weight: The weight param for `TotalVariation` regularization loss. Not used if 0 or None. (Default value = 10)
        alpha : regularization parameter for the ternarization
        optimizer_params: The **kwargs for optimizer [params](vis.optimizer#optimizerminimize). Will default to
            reasonable values when required keys are not found.
    Example:
        If you wanted to visualize the input image that would maximize the output index 22, say on
        final `keras.layers.Dense` layer, then, `filter_indices = [22]`, `layer_idx = dense_layer_idx`.
        If `filter_indices = [22, 23]`, then it should generate an input image that shows features of both classes.
    Returns:
        The model input that maximizes the output of `filter_indices` in the given `layer_idx`.
    """
    if backprop_modifier is not None:
        modifier_fn = get(backprop_modifier)
        model = modifier_fn(model)

    losses = [
        (ActivationMaximization(model.layers[layer_idx], filter_indices), act_max_weight),
        (LPNorm(model.input,1), lp_norm_weight),
        (TotalVariation(model.input), tv_weight),
        (EstimateTernaryInput(model.input), alpha)
    ]

    # Add grad_filter to optimizer_params.
    optimizer_params = utils.add_defaults_to_kwargs({
        'grad_modifier': grad_modifier,
        'input_modifiers' : [binarizer,],
    }, **optimizer_params)

    return visualize_activation_with_losses_dynamic(model.input, losses, wrt_tensor,alpha,
                                            seed_input, input_range, **optimizer_params)
def generate_opt_gif():
    """Example to show how to generate the gif of optimization progress.
    This example also shows how to use the optimizer directly with losses.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    output_class = [20]

    losses = [(ActivationMaximization(layer_dict[layer_name],
                                      output_class), 2),
              (LPNorm(model.input), 10), (TotalVariation(model.input), 10)]
    opt = Optimizer(model.input, losses)
    opt.minimize(max_iter=500,
                 verbose=True,
                 callbacks=[GifGenerator('opt_progress')])
def generate_opt_gif():
    """Example to show how to generate the gif of optimization progress.
    """
    # Build the VGG16 network with ImageNet weights
    model = VGG16(weights='imagenet', include_top=True)
    print('Model loaded.')

    # The name of the layer we want to visualize
    # (see model definition in vggnet.py)
    layer_name = 'predictions'
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    output_class = [20]

    losses = [(ActivationMaximization(layer_dict[layer_name],
                                      output_class), 1), (LPNorm(), 10),
              (TotalVariation(), 1)]
    opt = Optimizer(model.input, losses)

    # Jitter is used as a regularizer to create crisper images, but it makes gif animation ugly.
    opt.minimize(max_iter=500,
                 verbose=True,
                 jitter=0,
                 progress_gif_path='opt_progress')
from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm

filter_indices = [1, 2, 3]

# Tuple consists of (loss_function, weight)
# Add regularizers as needed.
losses = [
    (ActivationMaximization(keras_layer, filter_indices), 1),
    (LPNorm(model.input), 10),
    (TotalVariation(model.input), 10)
]

def visualize_activation(model,
                         layer_idx,
                         filter_indices=None,
                         seed_img=None,
                         text=None,
                         act_max_weight=1,
                         lp_norm_weight=10,
                         tv_weight=10,
                         **optimizer_params):
    """Generates stitched input image(s) over all `filter_indices` in the given `layer` that maximize
    the filter output activation.

    Args:
        model: The `keras.models.Model` instance. Model input is expected to be a 4D image input of shape:
            `(samples, channels, rows, cols)` if data_format='channels_first' or `(samples, rows, cols, channels)` if data_format='channels_last'.
        layer_idx: The layer index within `model.layers` whose filters needs to be visualized.
        filter_indices: filter indices within the layer to be maximized.
            For `keras.layers.Dense` layer, `filter_idx` is interpreted as the output index.

            If you are visualizing final `keras.layers.Dense` layer, you tend to get
            better results with 'linear' activation as opposed to 'softmax'. This is because 'softmax'
            output can be maximized by minimizing scores for other classes.

        filter indices within the layer to be maximized.
            If None, all filters are visualized. (Default value = None)

            An input image is generated for each entry in `filter_indices`. The entry can also be an array.
            For example, `filter_indices = [[1, 2], 3, [4, 5, 6]]` would generate three input images. The first one
            would maximize output of filters 1, 2, 3 jointly. A fun use of this might be to generate a dog-fish
            image by maximizing 'dog' and 'fish' output in final `Dense` layer.

            For `keras.layers.Dense` layers, `filter_idx` is interpreted as the output index.

            If you are visualizing final `keras.layers.Dense` layer, you tend to get
            better results with 'linear' activation as opposed to 'softmax'. This is because 'softmax'
            output can be maximized by minimizing scores for other classes.

        seed_img: Seeds the optimization with a starting image. Initialized with a random value when set to None.
            (Default value = None)
        text: The text to overlay on top of the generated image. (Default Value = None)
        act_max_weight: The weight param for `ActivationMaximization` loss. Not used if 0 or None. (Default value = 1)
        lp_norm_weight: The weight param for `LPNorm` regularization loss. Not used if 0 or None. (Default value = 10)
        tv_weight: The weight param for `TotalVariation` regularization loss. Not used if 0 or None. (Default value = 10)
        optimizer_params: The **kwargs for optimizer [params](vis.optimizer.md##optimizerminimize). Will default to
            reasonable values when required keys are not found.

    Example:
        If you wanted to visualize the input image that would maximize the output index 22, say on
        final `keras.layers.Dense` layer, then, `filter_indices = [22]`, `layer = dense_layer`.

        If `filter_indices = [22, 23]`, then it should generate an input image that shows features of both classes.

    Returns:
        Stitched image output visualizing input images that maximize the filter output(s). (Default value = 10)
    """
    filter_indices = utils.listify(filter_indices)
    print("Working on filters: {}".format(pprint.pformat(filter_indices)))

    # Default optimizer kwargs.
    optimizer_params_default = {
        'seed_img': seed_img,
        'max_iter': 200,
        'verbose': False,
        'image_modifiers': _DEFAULT_IMG_MODIFIERS
    }
    optimizer_params_default.update(optimizer_params)
    optimizer_params = optimizer_params_default

    losses = [(ActivationMaximization(model.layers[layer_idx],
                                      filter_indices), act_max_weight),
              (LPNorm(model.input), lp_norm_weight),
              (TotalVariation(model.input), tv_weight)]

    opt = Optimizer(model.input, losses, norm_grads=False)
    img = opt.minimize(**optimizer_params)[0]
    if text:
        img = utils.draw_text(img, text)
    return img
示例#6
0
# Pre-trained model
#model = ResNet50(weights='imagenet')

###---------------------------------------------------------------------------------------------------

layer_names = [
    "activation_10", "activation_22", "activation_34", "activation_46"
]
count = 1
for layer_name in layer_names:
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layer_idx = utils.find_layer_idx(model, layer_name)

    # Select 50 filters for each layer; either first 50 or randomly selected.
    filters = np.random.permutation(get_num_filters(
        model.layers[layer_idx]))[:50]

    for i in filters:
        losses = [(ActivationMaximization(layer_dict[layer_name], i), 2),
                  (LPNorm(model.input), 6), (TotalVariation(model.input), 1)]

        opt = Optimizer(model.input, losses)
        a, b, c = opt.minimize(max_iter=200,
                               verbose=False,
                               input_modifiers=[Jitter(0.05)])
        print(str(count) + '/200 DONE')
        count += 1
        a = Image.fromarray(a.astype("uint8"))
        a.save('act_max_output/' + layer_name + '_finetuned_' + str(i) +
               '.png')  # change this when pre-trained model is used