def init_salient(self, model):
        # Utility to search for layer index by name. 
        # Alternatively we can specify this as -1 since it corresponds to the last layer.
        first_output_name = None
        for i, layer in enumerate(model.layers):
            if first_output_name is None and "dropout" not in layer.name.lower() and "out" in layer.name.lower():
                first_output_name = layer.name
                layer_idx = i

        if first_output_name is None:
            print("Failed to find the model layer named with 'out'. Skipping salient.")
            return False

        print("####################")
        print("Visualizing activations on layer:", first_output_name)
        print("####################")
        
        # ensure we have linear activation
        model.layers[layer_idx].activation = activations.linear
        # build salient model and optimizer
        sal_model = utils.apply_modifications(model)
        modifier_fn = get('guided')
        sal_model_mod = modifier_fn(sal_model)
        losses = [
            (ActivationMaximization(sal_model_mod.layers[layer_idx], None), -1)
        ]
        self.opt = Optimizer(sal_model_mod.input, losses, norm_grads=False)
        return True
Ejemplo n.º 2
0
def visualize_cam(model,
                  layer_idx,
                  filter_indices,
                  seed_input,
                  penultimate_layer_idx=None,
                  backprop_modifier=None,
                  grad_modifier=None):
    """Generates a gradient based class activation map (grad-CAM) that maximizes the outputs of
    `filter_indices` in `layer_idx`.

    Args:
        model: The `keras.models.Model` instance. The model input shape must be: `(samples, channels, image_dims...)`
            if `image_data_format=channels_first` or `(samples, image_dims..., channels)` if
            `image_data_format=channels_last`.
        layer_idx: The layer index within `model.layers` whose filters needs to be visualized.
        filter_indices: filter indices within the layer to be maximized.
            If None, all filters are visualized. (Default value = None)
            For `keras.layers.Dense` layer, `filter_idx` is interpreted as the output index.
            If you are visualizing final `keras.layers.Dense` layer, consider switching 'softmax' activation for
            'linear' using [utils.apply_modifications](vis.utils.utils#apply_modifications) for better results.
        seed_input: The input image for which activation map needs to be visualized.
        penultimate_layer_idx: The pre-layer to `layer_idx` whose feature maps should be used to compute gradients
            wrt filter output. If not provided, it is set to the nearest penultimate `Conv` or `Pooling` layer.
        backprop_modifier: backprop modifier to use. See [backprop_modifiers](vis.backprop_modifiers.md). If you don't
            specify anything, no backprop modification is applied. (Default value = None)
        grad_modifier: gradient modifier to use. See [grad_modifiers](vis.grad_modifiers.md). If you don't
            specify anything, gradients are unchanged (Default value = None)

     Example:
        If you wanted to visualize attention over 'bird' category, say output index 22 on the
        final `keras.layers.Dense` layer, then, `filter_indices = [22]`, `layer = dense_layer`.

        One could also set filter indices to more than one value. For example, `filter_indices = [22, 23]` should
        (hopefully) show attention map that corresponds to both 22, 23 output categories.

    Returns:
        The heatmap image indicating the input regions whose change would most contribute towards
        maximizing the output of `filter_indices`.
    """
    if backprop_modifier is not None:
        modifier_fn = get(backprop_modifier)
        model = modifier_fn(model)

    penultimate_layer = _find_penultimate_layer(model, layer_idx,
                                                penultimate_layer_idx)

    # `ActivationMaximization` outputs negative gradient values for increase in activations. Multiply with -1
    # so that positive gradients indicate increase instead.
    losses = [(ActivationMaximization(model.layers[layer_idx],
                                      filter_indices), -1)]
    return visualize_cam_with_losses(model.input, losses, seed_input,
                                     penultimate_layer, grad_modifier)
Ejemplo n.º 3
0
def compute_tcav(model,
                 layer_idx,
                 filter_indices,
                 seed_input,
                 wrt_tensor=None,
                 backprop_modifier=None,
                 grad_modifier='absolute'):
    """Computes a Conceptual Sensitivity score `.

    Args:
        model: The `keras.models.Model` instance. The model input shape must be: `(samples, channels, image_dims...)`
            if `image_data_format=channels_first` or `(samples, image_dims..., channels)` if
            `image_data_format=channels_last`.
        layer_idx: The layer index within `model.layers` whose filters needs to be visualized.
        filter_indices: filter indices within the layer to be maximized.
            If None, all filters are visualized. (Default value = None)
            For `keras.layers.Dense` layer, `filter_idx` is interpreted as the output index.
            If you are visualizing final `keras.layers.Dense` layer, consider switching 'softmax' activation for
            'linear' using [utils.apply_modifications](vis.utils.utils#apply_modifications) for better results.
        seed_input: The model input for which activation map needs to be visualized.

        wrt_tensor: Short for, with respect to. The gradients of losses are computed with respect to this tensor.
            When None, this is assumed to be the same as `input_tensor` (Default value: None)
            ### NB. This will become the output of the
                layer at which Sensitivity is computed

        backprop_modifier: backprop modifier to use. See [backprop_modifiers](vis.backprop_modifiers.md). If you don't
            specify anything, no backprop modification is applied. (Default value = None)
        grad_modifier: gradient modifier to use. See [grad_modifiers](vis.grad_modifiers.md). By default `absolute`
            value of gradients are used. To visualize positive or negative gradients, use `relu` and `negate`
            respectively. (Default value = 'absolute')

    Example:
        If you wanted to visualize attention over 'bird' category, say output index 22 on the
        final `keras.layers.Dense` layer, then, `filter_indices = [22]`, `layer = dense_layer`.

        One could also set filter indices to more than one value. For example, `filter_indices = [22, 23]` should
        (hopefully) show attention map that corresponds to both 22, 23 output categories.

    Returns:
        Not sure yet.
    """
    if backprop_modifier is not None:
        modifier_fn = get(backprop_modifier)
        model = modifier_fn(model)

    # `ActivationMaximization` loss reduces as outputs get large, hence negative gradients indicate the direction
    # for increasing activations. Multiply with -1 so that positive gradients indicate increase instead.
    losses = [(ActivationMaximization(model.layers[layer_idx],
                                      filter_indices), -1)]
    return compute_tcav_with_losses(model.input, losses, seed_input,
                                    wrt_tensor, grad_modifier)
Ejemplo n.º 4
0
def visualize_saliency_3Dcnn(model, layer_idx, filter_indices, seed_input,original_img,
                       backprop_modifier=None, grad_modifier='absolute',save_pathname='images'):

    if backprop_modifier is not None:
        modifier_fn = backprop_modifiers.get(backprop_modifier)
        # model = backend.modify_model_backprop(model, 'guided')
        model = modifier_fn(model)


    # `ActivationMaximization` loss reduces as outputs get large, hence negative gradients indicate the direction
    # for increasing activations. Multiply with -1 so that positive gradients indicate increase instead.
    losses = [
        (ActivationMaximization(model.layers[layer_idx], filter_indices), -1)
    ]
    if not os.path.exists('{}/{}'.format(save_pathname,filter_indices+1)):
        os.makedirs('{}/{}'.format(save_pathname,filter_indices+1))

    visualize_saliency_with_losses(model.input, losses, seed_input,original_img, grad_modifier,save_path='{}/{}/'.format(save_pathname,filter_indices+1))
Ejemplo n.º 5
0
def salience_visualization(model,
                           save_directory,
                           conn_name,
                           output_x,
                           output_y,
                           output_f,
                           verbose=True,
                           clas=None,
                           perc_output=1):
    from vis.visualization import visualize_saliency
    from vis.losses import ActivationMaximization
    from vis.optimizer import Optimizer
    from vis.utils import utils
    from vis.backprop_modifiers import get
    from keras import activations

    if clas != None:
        output_y = np.zeros(output_y.shape) + clas
    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    #layer_idx = utils.find_layer_idx(model, 'preds')
    layer_idx = -1
    # Swap softmax with linear
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    ###
    modifier = 'guided'  # can be None (AKA vanilla) or 'relu'
    save_grads_path = os.path.join(save_directory, 'salience', conn_name)
    if not os.path.isdir(os.path.join(save_directory, 'salience')):
        os.mkdir(os.path.join(save_directory, 'salience'))
    if not os.path.isdir(save_grads_path):
        os.mkdir(save_grads_path)
    print("Outputting saliency maps")
    if False:
        for the_file in os.listdir(save_grads_path):
            file_path = os.path.join(save_grads_path, the_file)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
            except Exception as e:
                print(e)
    modifier_fn = get(modifier)
    model = modifier_fn(model)
    for idx in range(output_x.shape[0]):
        if verbose:
            update_progress(float(idx) / output_x.shape[0])
        if float(idx) / output_x.shape[0] > perc_output:
            break
        #savename = os.path.join(save_grads_path,output_f[idx])
        #if os.path.isfile(savename):
        #	continue
        losses = [(ActivationMaximization(model.layers[layer_idx],
                                          int(output_y[idx][0])), -1)]
        opt = Optimizer(model.input, losses, wrt_tensor=None, norm_grads=False)
        grads = opt.minimize(seed_input=output_x[idx],
                             max_iter=1,
                             grad_modifier='absolute',
                             verbose=False)[1]
        for i in range(grads.shape[3]):
            wc_subfolder = os.path.join(save_grads_path, "wc%d" % i)
            if not os.path.isdir(wc_subfolder):
                os.mkdir(wc_subfolder)
            np.savetxt(os.path.join(wc_subfolder, output_f[idx]),
                       np.squeeze(grads[:, :, :, i]))