Exemplo n.º 1
0
def gradcam(
    features: Module, classifier: Module, input_image: Tensor, n_top_classes: int = 3
) -> Tensor:
    """Performs GradCAM on an input image.

    For further explanations about GradCam, see https://arxiv.org/abs/1610.02391.

    Args:
        features: the spatial feature part of the model, before classifier
        classifier: the classifier part of the model, after spatial features
        input_image: image tensor of dimensions (c, h, w) or (1, c, h, w)
        n_top_classes: the number of classes to calculate GradCAM for

    Returns:
        a GradCAM heatmap of dimensions (h, w)

    """
    # Get selector for top k classes
    input_image = _prepare_input(input_image)
    class_selector = _top_k_selector(
        Sequential(features.eval(), classifier.eval()), input_image, n_top_classes
    )
    # Apply spatial GradAM on classes
    gradam = GradAM(classifier, class_selector, features, SpatialSplit())
    result = gradam.visualize(input_image)
    return _upscale(result, tuple(input_image.size()[2:]))
Exemplo n.º 2
0
def class_visualization(net: Module, class_index: int) -> Tensor:
    """Visualizes a class for a classification network.

    Args:
        net: the network to visualize for
        class_index: the index of the class to visualize

    """
    if class_index < 0:
        raise ValueError(f"Invalid class: {class_index}")

    img = PixelActivation(
        net.eval(),
        SplitSelector(NeuronSplit(), [class_index]),
        opt_n=500,
        iter_n=20,
        init_size=50,
        transform=RandomTransform(scale_fac=0)
        + BilateralTransform()
        + ResizeTransform(1.1),
        regularization=[TVRegularization(5e1), WeightDecay(1e-9)],
    ).visualize()

    return PixelActivation(
        net,
        SplitSelector(NeuronSplit(), [class_index]),
        opt_n=100,
        iter_n=int(50),
        transform=RandomTransform() + BilateralTransform(),
        regularization=[TVRegularization(), WeightDecay()],
    ).visualize(img)
Exemplo n.º 3
0
    def validate(self, model: Module, data_loader: DataLoader, use_cuda: bool,
                 criterion: Module) -> float:
        """Performs one epoch of validating of the model and returns the
        obtained validation loss.

        :param model: model to be validated
        :param data_loader: validation data loader
        :param use_cuda: a flag whether CUDA can be used
        :param criterion: loss function
        :return: validation loss
        """
        valid_loss = 0.0
        model.eval()
        for batch_idx, (data, target) in enumerate(tqdm(data_loader)):
            data = self.move_to_gpu(data, use_cuda)
            output = model.forward(*data)
            loss = criterion(*output)
            valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
        return valid_loss
Exemplo n.º 4
0
def occlusion(net: Module, input_image: Tensor, n_top_classes: int = 3) -> Tensor:
    """Creates a attribution heatmap by occluding parts of the input image.

    Args:
        net: the network to visualize attribution for
        input_image: image tensor of dimensions (c, h, w) or (1, c, h, w)
        n_top_classes: the number of classes to account for

    Returns:
        a occlusion heatmap of dimensions (h, w)

    """
    input_image = _prepare_input(input_image)
    class_selector = _top_k_selector(net.eval(), input_image, n_top_classes)
    # Apply occlusion
    occlusion_ = Occlusion(net, class_selector, SpatialSplit(), [1, 10, 10], [1, 5, 5])
    result = occlusion_.visualize(input_image)
    return _upscale(result, tuple(input_image.size()[2:]))