示例#1
0
def visualizeAllClasses(self,
                        image_normalized,
                        input_non_normalized,
                        listOfClasses,
                        guided=False,
                        use_gpu=False,
                        cmap='viridis',
                        alpha=.5):
    w = input_non_normalized.shape[3]
    h = input_non_normalized.shape[2]

    result = torch.zeros(1, 3, h, w * len(listOfClasses))
    idx = 0
    for i in listOfClasses:
        result[:, :, :, idx * w:(idx + 1) * w] = self.calculate_gradients(
            image_normalized,
            i,
            guided=guided,
            take_max=True,  #True
            use_gpu=use_gpu)
        idx = idx + 1

    stdized = standardize_and_clip(result)
    stdized = torch.cat((stdized, input_non_normalized), 3)

    output = (format_for_plotting(stdized), cmap, alpha)
    return output
示例#2
0
def test_standardize_and_clip_detach_input_from_graph():

    input_ = torch.randint(low=-1000, high=1000, size=(224, 224)).float()
    input_.requires_grad = True
    normalized = standardize_and_clip(input_)

    assert not normalized.requires_grad
示例#3
0
    def _visualize_filters(self, layer, filter_idxs, num_iter, num_subplots,
                           title):
        # Prepare the main plot

        num_cols = 4
        num_rows = int(np.ceil(num_subplots / num_cols))

        fig = plt.figure(figsize=(16, num_rows * 5))
        plt.title(title)
        plt.axis('off')

        self.output = []

        # Plot subplots

        for i, filter_idx in enumerate(filter_idxs):
            output = self.optimize(layer, filter_idx, num_iter=num_iter)

            self.output.append(output)

            ax = fig.add_subplot(num_rows, num_cols, i+1)
            ax.set_xticks([])
            ax.set_yticks([])
            ax.set_title(f'filter {filter_idx}')

            ax.imshow(format_for_plotting(
                standardize_and_clip(output[-1],
                                     saturation=0.15,
                                     brightness=0.7)))

        plt.subplots_adjust(wspace=0, hspace=0);
示例#4
0
    def deepdream(self,
                  img_path,
                  layer,
                  filter_idx,
                  lr=.1,
                  num_iter=20,
                  figsize=(4, 4),
                  title='DeepDream',
                  return_output=False):
        """Creates DeepDream.

        It applies the optimization on the image provided. The image is loaded
        and made into a torch.Tensor that is compatible as the input to the
        network.

        Read the original blog post by Google for more information on
        `DeepDream <https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html>`_.

        Args:
            img_path (str): A path to the image you want to apply DeepDream on
            layer (torch.nn.modules.conv.Conv2d): The target Conv2d layer from
                which the filter to be chosen, based on `filter_idx`.
            filter_idx (int): The index of the target filter.
            lr (float, optional, default=.1): The step size of optimization.
            num_iter (int, optional, default=30): The number of iteration for
                the gradient ascent operation.
            figsize (tuple, optional, default=(4, 4)): The size of the plot.
                Relevant in case 1 above.
            title (str, optional default='Conv2d'): The title of the plot.
            return_output (bool, optional, default=False): Returns the
                output(s) of optimization if set to True.

        Returns:
            output (list of torch.Tensor): With dimentions
                :math:`(num_iter, C, H, W)`. The size of the image is
                determined by `img_size` attribute which defaults to 224.

        """ # noqa

        input_ = apply_transforms(load_image(img_path), self.img_size)

        self._lr = lr
        output = self.optimize(layer, filter_idx, input_, num_iter=num_iter)

        plt.figure(figsize=figsize)
        plt.axis('off')
        plt.title(title)

        plt.imshow(
            format_for_plotting(
                standardize_and_clip(output[-1],
                                     saturation=0.15,
                                     brightness=0.7)))
        # noqa

        if return_output:
            return output
示例#5
0
def test_standardize_and_clip_mono_channel_tensor():
    default_min = 0.0
    default_max = 1.0

    input_ = torch.randint(low=-1000, high=1000, size=(1, 224, 224)).float()
    normalized = standardize_and_clip(input_)

    assert normalized.shape == input_.shape
    assert normalized.min().item() >= default_min
    assert normalized.max().item() <= default_max
示例#6
0
    def _visualize_filter(self, layer, filter_idx, num_iter, figsize, title):
        self.output = self.optimize(layer, filter_idx, num_iter=num_iter)

        plt.figure(figsize=figsize)
        plt.axis('off')
        plt.title(title)

        plt.imshow(format_for_plotting(
            standardize_and_clip(self.output[-1],
                                 saturation=0.15,
                                 brightness=0.7)));
示例#7
0
def test_standardize_and_clip_add_eplison_when_std_is_zero():
    default_min = 0.0
    default_max = 1.0

    input_ = torch.zeros(1, 244, 244)
    normalized = standardize_and_clip(input_)

    assert normalized.shape == input_.shape

    for channel in normalized:
        assert channel.min().item() >= default_min
        assert channel.max().item() <= default_max
示例#8
0
def test_standardize_and_clip_with_custom_min_max():
    custom_min = 2.0
    custom_max = 3.0

    input_ = torch.randint(low=-1000, high=1000, size=(224, 224)).float()
    normalized = standardize_and_clip(input_,
                                      min_value=custom_min,
                                      max_value=custom_max)

    assert normalized.shape == input_.shape
    assert normalized.min() >= custom_min
    assert normalized.max() <= custom_max
示例#9
0
def get_input_gradient(model, fname,guided=True, take_max=False, use_gpu=True):
  '''Warper for getting image gradient + cool visualize for it
  Input:
    model: (__main__.Model) Model 
    fname: (str) Image file path
  
  Output:
    gradient for fname 
  
  
  '''
  ## Load image file
  img = load_img(fname)
  backprop = Backprop(model)
  output, target_class = backprop.calculate_gradients(img, None, take_max, guided, use_gpu)
  
  # Reshaping img gradient - Remove batch + move channel to last dim
  x = output
  output = format_for_plotting(output) 
  clip_grad = standardize_and_clip(output) #Clip gradient of image from 0 -> 1
  
  print(f'Model predicted {target_class}')
  
  fig = plt.figure()
  ax = fig.add_subplot(1, 3, 1)
  ax.set_axis_off()
  ax.imshow(clip_grad)
  ax.set_title('Gradient')
  
  ax = fig.add_subplot(1, 3, 2)
  ax.set_axis_off()
  ax.imshow(format_for_plotting(img))
  ax.set_title('Original image')

  
  ax = fig.add_subplot(1, 3, 3)
  ax.set_axis_off()
  ax.imshow(format_for_plotting(img))
  ax.imshow(clip_grad, alpha=0.3)
  ax.set_title('Blend grad and\n original image')

  
  
  fig.show()
  return output, x
    def _visualize_filter(self, layer, filter_idx, num_iter, figsize, title):
        self.output = self.optimize(layer, filter_idx, num_iter=num_iter)

        if self.plot:
            plt.figure(figsize=figsize)
            plt.axis('off')
            plt.title(title)

            temp = format_for_plotting(standardize_and_clip(self.output[-1],
                                        saturation=0.15,
                                        brightness=0.7))

            if temp.shape[0] == 3:
                plt.imshow(temp)
            else:
                fig, axes = plt.subplots(1, temp.shape[0])
                for i in range(temp.shape[0]):
                    axes[i].imshow(temp[i])
示例#11
0
def visualizeHeatmap(self,
                     input_,
                     target_class,
                     guided=False,
                     use_gpu=False,
                     cmap='viridis',
                     alpha=.5):

    # Calculate gradients
    max_gradients = self.calculate_gradients(
        input_,
        target_class,
        guided=guided,
        take_max=
        True,  # We are not taking max because the interplay between channels is important!
        use_gpu=use_gpu)
    #return image, cmap, alpha
    output = (format_for_plotting(
        standardize_and_clip(max_gradients, saturation=1)), cmap, alpha)
    return [output, output]
示例#12
0
def visualizeOverlay(self,
                     input_,
                     denormalizedInput_,
                     target_class,
                     guided=False,
                     use_gpu=False,
                     cmap='viridis',
                     alpha=.5):

    # Calculate gradients
    max_gradients = self.calculate_gradients(input_,
                                             target_class,
                                             guided=guided,
                                             take_max=False,
                                             use_gpu=use_gpu)
    clipped_gradients = format_for_plotting(
        standardize_and_clip(max_gradients, saturation=1))
    #return two entries of type (image, cmap, alpha)
    return [(format_for_plotting(denormalizedInput_), None, None),
            (clipped_gradients, cmap, alpha)]
示例#13
0
def visualizeAllClasses(self,
                        image_normalized,
                        listOfClasses,
                        guided=False,
                        use_gpu=False):
    w = image_normalized.shape[3]
    h = image_normalized.shape[2]
    colors = image_normalized.shape[1]

    result = torch.zeros(1, colors, h, w * len(listOfClasses))
    idx = 0
    for i in listOfClasses:
        result[:, :, :, idx * w:(idx + 1) * w] = self.calculate_gradients(
            image_normalized,
            i,
            guided=guided,
            take_max=True,  #True
            use_gpu=use_gpu)
        idx = idx + 1

    stdized = standardize_and_clip(result, saturation=0.4)

    return stdized
示例#14
0
    def visualize(self,
                  input_,
                  target_class,
                  guided=False,
                  use_gpu=False,
                  figsize=(16, 4),
                  cmap='viridis',
                  alpha=.5,
                  return_output=False):
        """Calculates gradients and visualizes the output.

        A method that combines the backprop operation and visualization.

        It also returns the gradients, if specified with `return_output=True`.

        Args:
            input_ (torch.Tensor): With shape :math:`(N, C, H, W)`.
            target_class (int, optional, default=None)
            take_max (bool, optional, default=False): If True, take the maximum
                gradients across colour channels for each pixel.
            guided (bool, optional, default=Fakse): If True, perform guided
                backpropagation. See `Striving for Simplicity: The All
                Convolutional Net <https://arxiv.org/pdf/1412.6806.pdf>`_.
            use_gpu (bool, optional, default=False): Use GPU if set to True and
                `torch.cuda.is_available()`.
            figsize (tuple, optional, default=(16, 4)): The size of the plot.
            cmap (str, optional, default='viridis): The color map of the
                gradients plots. See avaialable color maps `here <https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html>`_.
            alpha (float, optional, default=.5): The alpha value of the max
                gradients to be jaxaposed on top of the input image.
            return_output (bool, optional, default=False): Returns the
                output(s) of optimization if set to True.

        Returns:
            gradients (torch.Tensor): With shape :math:`(C, H, W)`.
        """

        # Calculate gradients

        gradients = self.calculate_gradients(input_,
                                             target_class,
                                             guided=guided)
        max_gradients = self.calculate_gradients(input_,
                                                 target_class,
                                                 guided=guided,
                                                 take_max=True)

        # Setup subplots

        subplots = [
            # (title, [(image1, cmap, alpha), (image2, cmap, alpha)])
            ('Input image', [(format_for_plotting(denormalize(input_)), None,
                              None)]),
            ('Gradients across RGB channels',
             [(format_for_plotting(standardize_and_clip(gradients)), None,
               None)]),
            ('Max gradients',
             [(format_for_plotting(standardize_and_clip(max_gradients)), cmap,
               None)]),
            ('Overlay',
             [(format_for_plotting(denormalize(input_)), None, None),
              (format_for_plotting(standardize_and_clip(max_gradients)), cmap,
               alpha)])
        ]

        fig = plt.figure(figsize=figsize)

        for i, (title, images) in enumerate(subplots):
            ax = fig.add_subplot(1, len(subplots), i + 1)
            ax.set_axis_off()
            ax.set_title(title)

            for image, cmap, alpha in images:
                ax.imshow(image, cmap=cmap, alpha=alpha)

        if return_output:
            return gradients, max_gradients