コード例 #1
0
def get_acts(model, x, layer_name, convert=False):
    """
    Return activations at :param:`layer_name` if :param:`model` for input
    :param:`x`.

    Args:
        model (:class:`nn.Module`): PyTorch model.
        x (:class:`torch.Tensor`): input tensor for model.
        layer_name (str): name of PyTorch module from which to collect
            activations.
        convert (bool, optional): If True, convert intermediate activations
            to numpy array using :func:`torch_to_numpy`. Default: `False`.

    Returns:
        (:class:`torch.Tensor` or :class:`np.ndarray`): intermediate
            activations.
    """
    assert isinstance(x, torch.Tensor)
    assert isinstance(model, nn.Module)

    # Get layer.
    layer = get_module(model, layer_name)
    assert layer is not None

    # Attach probe to layer to collect activations.
    probe = Probe(layer, "output")
    _ = model(x)

    # Return tensor.
    if not convert:
        return probe.data[0]

    # Return numpy array.
    return torch_to_numpy(probe.data[0])
コード例 #2
0
def guided_backprop_grad_cam(model, data, main_folder, n_batches=None):
    gb_model = GuidedBackpropReLUModel(model=model)
    classes = data["test"].dataset.classes
    i = 0
    for inputs, labels in data['test']:
        # print(f"{i}/{int(len(data['test'].dataset.samples)/16)}", end="\r")
        sys.stdout.write('\r' + f"batch nr: {i + 1}")
        inputs = inputs  #.to('cuda:0')
        labels = labels
        x = inputs
        #     break
        x.requires_grad_()
        saliency_layer = get_module(model, model.layer4)
        probe = Probe(saliency_layer, target='output')
        y = model(x)
        score_max_index = y.argmax(dim=1)
        z = y[:, score_max_index]
        z.backward(torch.ones_like(z))
        saliency = gradient_to_grad_cam_saliency(probe.data[0])

        for index in range(len(saliency)):
            heatmap = np.float32(saliency[index, 0].cpu().detach())
            img = np.array(deprocess(x[index].cpu().detach()))

            heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
            heatmap = np.uint8(255 * heatmap)
            cam_mask = cv2.merge([heatmap, heatmap, heatmap])

            gb = gb_model(x[index].unsqueeze(0).detach().cpu(),
                          target_category=labels[index].cpu())
            gb = gb.transpose((1, 2, 0))
            cam_gb = deprocess_image_gb(cam_mask * gb)

            label = classes[labels[index]]
            true = labels[index]
            pred = score_max_index[index]
            pred_res = "OK"
            if pred != true:
                pred_res = "wrong"

            input_filename = Path(
                data['test'].dataset.samples[i * len(saliency) +
                                             index][0]).stem
            cv2.imwrite(
                str(main_folder / f"{label}/{input_filename}_{pred_res}.png"),
                cam_gb)


#         if n_batches:
#             if i + 1 == n_batches:
#                 break
        i += 1
コード例 #3
0
def grad_cam_batch(model, inputs, gb_cam=False):
    # if gb_cam:
    #     x = inputs
    # else:
    x = inputs.to(0)
    x.requires_grad_()
    saliency_layer = get_module(model, model.layer4)
    probe = Probe(saliency_layer, target="output")
    y = model(x)
    score_max_index = y.argmax(dim=1)
    z = y[:, score_max_index]
    z.backward(torch.ones_like(z))
    grad_cam = gradient_to_grad_cam_saliency(probe.data[0])
    if not gb_cam:
        return grad_cam, score_max_index
    else:
        return grad_cam, score_max_index, x
コード例 #4
0
def update_probe(probed_model):
    module = get_module(probed_model.model, probed_model.module)
    probed_model.probe = Probe(module, target="output")
コード例 #5
0
ファイル: utils.py プロジェクト: smstrzd/IntegratedGradCAM
def saliency(model,
             input,
             target,
             baseline=None,
             saliency_layer='',
             resize=False,
             resize_mode='bilinear',
             smooth=0,
             context_builder=NullContext,
             gradient_to_saliency=gradient_to_grad_cam_saliency,
             get_backward_gradient=get_backward_gradient,
             debug=False):
    """Apply a backprop-based attribution method to an image.

    The saliency method is specified by a suitable context factory
    :attr:`context_builder`. This context is used to modify the backpropagation
    algorithm to match a given visualization method. This:

    Args:
        model (:class:`torch.nn.Module`): a model.
        input (:class:`torch.Tensor`): input tensor.
        target (int or :class:`torch.Tensor`): target label(s).
        saliency_layer (str or :class:`torch.nn.Module`, optional): name of the
            saliency layer (str) or the layer itself (:class:`torch.nn.Module`)
            in the model at which to visualize. Default: ``''`` (visualize
            at input).
        resize (bool or tuple, optional): if True, upsample saliency map to the
            same size as :attr:`input`. It is also possible to specify a pair
            (width, height) for a different size. Default: ``False``.
        resize_mode (str, optional): upsampling method to use. Default:
            ``'bilinear'``.
        smooth (float, optional): amount of Gaussian smoothing to apply to the
            saliency map. Default: ``0``.
        context_builder (type, optional): type of context to use. Default:
            :class:`NullContext`.
        gradient_to_saliency (function, optional): function that converts the
            pseudo-gradient signal to a saliency map. Default:
            :func:`gradient_to_saliency`.
        get_backward_gradient (function, optional): function that generates
            gradient tensor to backpropagate. Default:
            :func:`get_backward_gradient`.
        debug (bool, optional): if True, also return an
            :class:`collections.OrderedDict` of :class:`Probe` objects for
            all modules in the model. Default: ``False``.

    Returns:
        :class:`torch.Tensor` or tuple: If :attr:`debug` is False, returns a
        :class:`torch.Tensor` saliency map at :attr:`saliency_layer`.
        Otherwise, returns a tuple of a :class:`torch.Tensor` saliency map
        at :attr:`saliency_layer` and an :class:`collections.OrderedDict`
        of :class:`Probe` objects for all modules in the model.
    """

    # Clear any existing gradient.
    if input.grad is not None:
        input.grad.data.zero_()

    # Disable gradients for model parameters.
    orig_requires_grad = {}
    for name, param in model.named_parameters():
        orig_requires_grad[name] = param.requires_grad
        param.requires_grad_(False)

    # Set model to eval mode.
    if model.training:
        orig_is_training = True
        model.eval()
    else:
        orig_is_training = False

    # Attach debug probes to every module.
    debug_probes = attach_debug_probes(model, debug=debug)

    # Attach a probe to the saliency layer.
    probe_target = 'input' if saliency_layer == '' else 'output'
    saliency_layer = get_module(model, saliency_layer)
    assert saliency_layer is not None, 'We could not find the saliency layer'
    probe = Probe(saliency_layer, target=probe_target)

    # Do a forward and backward pass.
    with context_builder():
        output = model(input)
        backward_gradient = get_backward_gradient(output, target)
        output.backward(backward_gradient)

    # Get saliency map from gradient.
    saliency_map = gradient_to_saliency(probe.data[0], baseline)

    # Resize saliency map.
    saliency_map = resize_saliency(input,
                                   saliency_map,
                                   resize,
                                   mode=resize_mode)

    # Smooth saliency map.
    if smooth > 0:
        saliency_map = imsmooth(
            saliency_map,
            sigma=smooth * max(saliency_map.shape[2:]),
            padding_mode='replicate'
        )

    # Remove probe.
    probe.remove()

    # Restore gradient saving for model parameters.
    for name, param in model.named_parameters():
        param.requires_grad_(orig_requires_grad[name])

    # Restore model's original mode.
    if orig_is_training:
        model.train()

    if debug:
        return saliency_map, debug_probes
    else:
        return saliency_map
コード例 #6
0
from torchray.attribution.common import Probe, get_module
from torchray.attribution.linear_approx import gradient_to_linear_approx_saliency
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Linear approximation.
saliency_layer = get_module(model, 'features.29')

probe = Probe(saliency_layer, target='output')

y = model(x)
z = y[0, category_id]
z.backward()

saliency = gradient_to_linear_approx_saliency(probe.data[0])

# Plots.
plot_example(x, saliency, 'linear approx', category_id)
コード例 #7
0
def create_net2vec(model, 
                   module_name, 
                   n_categories, 
                   device,
                   pretrained=False, 
                   weights_path=None,
                   initialize=False,
                   example_batch=None,
                   nonlinear=False,
                   partial_projection=False,
                   t=0):
    layer = get_module(model, module_name)
    activation_probe = Probe(layer, 'output')
    extracted = extract_last(layer)
    if extracted is None:
        # i.e. current layer doesn't have children and isn't a linear transform
        _ = model(example_batch.to(device))
        n_neurons = activation_probe.data[0].shape[1]
    else:
        n_neurons = extracted.weight.shape[0]
    if not nonlinear:
        net = nn.Linear(
            n_neurons, n_categories
        )
    else:
        net = models.mlp_(
            in_dim=n_neurons,
            out=n_categories
        )
    # regardless, this should work 
    if pretrained:
        assert weights_path is not None
        if os.path.exists(weights_path):
            net.load_state_dict(
                torch.load(weights_path,map_location=lambda storage, loc: storage)
            )
        elif initialize:
            torch.save(net.state_dict(), weights_path)
        else:
            raise Exception("If you want to create a new model, please denote initialize for: " + str(weights_path))
    
    
    net.to(device)
    if partial_projection:
        embeddings = net.weight.data[:n_categories-2]
        vg = net.weight.data[-2] - net.weight.data[-1]
        embeddings = debias.partial_orthogonalization(
            embeddings,
            vg,
            t=1e-2
        )
        with torch.no_grad():
            net.weight.data = torch.cat(
                [embeddings,
                    net.weight.data[-2].unsqueeze(0),
                    net.weight.data[-1].unsqueeze(0)
                ]
            )
    
    def net2vec(X, forward=False, switch_modes=True):
        # i.e. haven't updated the probe...
        if not forward:
            first_mode = model.training
            if switch_modes:
                model.eval()
                _ = model(X)
                if first_mode:
                    model.train()
            else:
                _ = model(X)
        features = activation_probe.data[0]
        if len(features.shape) == 4:
            # conv output
            features = torch.mean(features, (2,3), keepdim=True).squeeze()
        elif len(features.shape) == 2:
            # linear output
            pass 
        else:
            raise Exception("Neither Linear or Conv module given")
        return net(features)

    return net, net2vec, activation_probe
from torchray.attribution.common import Probe, get_module
from torchray.attribution.excitation_backprop import ExcitationBackpropContext
from torchray.attribution.excitation_backprop import gradient_to_contrastive_excitation_backprop_saliency
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Contrastive excitation backprop.
input_layer = get_module(model, 'features.9')
contrast_layer = get_module(model, 'features.30')
classifier_layer = get_module(model, 'classifier.6')

input_probe = Probe(input_layer, target='output')
contrast_probe = Probe(contrast_layer, target='output')

with ExcitationBackpropContext():
    y = model(x)
    z = y[0, category_id]
    classifier_layer.weight.data.neg_()
    z.backward()

    classifier_layer.weight.data.neg_()

    contrast_probe.contrast = [contrast_probe.data[0].grad]

    y = model(x)
    z = y[0, category_id]
    z.backward()

saliency = gradient_to_contrastive_excitation_backprop_saliency(