Exemple #1
0
 def generate(self, filename):
     initial_learning_rate = 6
     for i in range(1, 150):
         # Process image and return variable
         self.processed_image = preprocess_image(self.created_image)
         # Define optimizer for the image
         optimizer = SGD([self.processed_image], lr=initial_learning_rate)
         # Forward
         output = self.model(self.processed_image)
         # Target specific class
         class_loss = -output[0, self.target_class]
         print('Iteration:', str(i), 'Loss',
               "{0:.2f}".format(class_loss.data.numpy()[0]))
         # Zero grads
         self.model.zero_grad()
         # Backward
         class_loss.backward()
         # Update image
         optimizer.step()
         # Recreate image
         self.created_image = recreate_image(self.processed_image)
         # Save image
         cv2.imwrite(
             'generated/' + filename + 'c_specific_iteration_' + str(i) +
             '.jpg', self.created_image)
     return self.processed_image
 def dream(self):
     # Process image and return variable
     self.processed_image = preprocess_image(self.created_image, False)
     # Define optimizer for the image
     # Earlier layers need higher learning rates to visualize whereas layer layers need less
     optimizer = SGD([self.processed_image], lr=12, weight_decay=1e-4)
     for i in range(1, 251):
         optimizer.zero_grad()
         # Assign create image to a variable to move forward in the model
         x = self.processed_image
         for index, layer in enumerate(self.model):
             # Forward
             x = layer(x)
             # Only need to forward until we the selected layer is reached
             if index == self.selected_layer:
                 break
         # Loss function is the mean of the output of the selected layer/filter
         # We try to minimize the mean of the output of that specific filter
         loss = torch.mean(self.conv_output)
         print('Iteration:', str(i), 'Loss:',
               "{0:.2f}".format(loss.data.numpy()[0]))
         # Backward
         loss.backward()
         # Update image
         optimizer.step()
         # Recreate image
         self.created_image = recreate_image(self.processed_image)
         # Save image every 20 iteration
         if i % 20 == 0:
             cv2.imwrite('generated/ddream_l' + str(self.selected_layer) +
                         '_f' + str(self.selected_filter) + '_iter' + str(
                 i) + '.jpg', self.created_image)
Exemple #3
0
    def generate_inverted_image_specific_layer(self,
                                               input_image,
                                               img_size,
                                               filename,
                                               target_layer=3):
        # Generate a random image which we will optimize
        opt_img = Variable(1e-1 * torch.randn(1, 3, img_size, img_size),
                           requires_grad=True)
        # Define optimizer for previously created image
        optimizer = SGD([opt_img], lr=1e4, momentum=0.9)
        # Get the output from the model after a forward pass until target_layer
        # with the input image (real image, NOT the randomly generated one)
        input_image_layer_output = \
            self.get_output_from_specific_layer(input_image, target_layer)

        # Alpha regularization parametrs
        # Parameter alpha, which is actually sixth norm
        alpha_reg_alpha = 6
        # The multiplier, lambda alpha
        alpha_reg_lambda = 1e-7

        # Total variation regularization parameters
        # Parameter beta, which is actually second norm
        tv_reg_beta = 2
        # The multiplier, lambda beta
        tv_reg_lambda = 1e-8

        for i in range(201):
            optimizer.zero_grad()
            # Get the output from the model after a forward pass until target_layer
            # with the generated image (randomly generated one, NOT the real image)
            output = self.get_output_from_specific_layer(opt_img, target_layer)
            # Calculate euclidian loss
            euc_loss = 1e-1 * self.euclidian_loss(
                input_image_layer_output.detach(), output)
            # Calculate alpha regularization
            reg_alpha = alpha_reg_lambda * self.alpha_norm(
                opt_img, alpha_reg_alpha)
            # Calculate total variation regularization
            reg_total_variation = tv_reg_lambda * self.total_variation_norm(
                opt_img, tv_reg_beta)
            # Sum all to optimize
            loss = euc_loss + reg_alpha + reg_total_variation
            # Step
            loss.backward()
            optimizer.step()
            # Generate image every 5 iterations
            if i % 5 == 0:
                print('Iteration:', str(i), 'Loss:', loss.data.numpy()[0])
                x = recreate_image(opt_img)
                cv2.imwrite(
                    'generated/' + filename + '_Inv_Image_Layer_' +
                    str(target_layer) + '_Iteration_' + str(i) + '.jpg', x)
            # Reduce learning rate every 40 iterations
            if i % 40 == 0:
                for param_group in optimizer.param_groups:
                    param_group['lr'] *= 1 / 10
Exemple #4
0
 def visualise_layer_without_hooks(self):
     # Process image and return variable
     self.processed_image = preprocess_image(self.created_image)
     # Define optimizer for the image
     # Earlier layers need higher learning rates to visualize whereas later layers need less
     optimizer = SGD([self.processed_image], lr=5, weight_decay=1e-6)
     for i in range(1, 51):
         optimizer.zero_grad()
         # Assign create image to a variable to move forward in the model
         x = self.processed_image
         for index, layer in enumerate(self.model):
             # Forward pass layer by layer
             x = layer(x)
             if index == self.selected_layer:
                 # Only need to forward until the selected layer is reached
                 # Now, x is the output of the selected layer
                 break
         # Here, we get the specific filter from the output of the convolution operation
         # x is a tensor of shape 1x512x28x28.(For layer 17)
         # So there are 512 unique filter outputs
         # Following line selects a filter from 512 filters so self.conv_output will become
         # a tensor of shape 28x28
         self.conv_output = x[0, self.selected_filter]
         # Loss function is the mean of the output of the selected layer/filter
         # We try to minimize the mean of the output of that specific filter
         loss = torch.mean(self.conv_output)
         print('Iteration:', str(i), 'Loss:',
               "{0:.2f}".format(loss.data.numpy()[0]))
         # Backward
         loss.backward()
         # Update image
         optimizer.step()
         # Recreate image
         self.created_image = recreate_image(self.processed_image)
         # Save image
         if i % 5 == 0:
             cv2.imwrite(
                 '../generated/layer_vis_l' + str(self.selected_layer) +
                 '_f' + str(self.selected_filter) + '_iter' + str(i) +
                 '.jpg', self.created_image)
Exemple #5
0
 def visualise_layer_with_hooks(self):
     # Hook the selected layer
     self.hook_layer()
     # Process image and return variable
     self.processed_image = preprocess_image(self.created_image)
     # Define optimizer for the image
     # Earlier layers need higher learning rates to visualize whereas later layers need less
     optimizer = SGD([self.processed_image], lr=5, weight_decay=1e-6)
     for i in range(1, 51):
         optimizer.zero_grad()
         # Assign create image to a variable to move forward in the model
         x = self.processed_image
         for index, layer in enumerate(self.model):
             # Forward pass layer by layer
             # x is not used after this point because it is only needed to trigger
             # the forward hook function
             x = layer(x)
             # Only need to forward until the selected layer is reached
             if index == self.selected_layer:
                 # (forward hook function triggered)
                 break
         # Loss function is the mean of the output of the selected layer/filter
         # We try to minimize the mean of the output of that specific filter
         loss = torch.mean(self.conv_output)
         print('Iteration:', str(i), 'Loss:',
               "{0:.2f}".format(loss.data.numpy()[0]))
         # Backward
         loss.backward()
         # Update image
         optimizer.step()
         # Recreate image
         self.created_image = recreate_image(self.processed_image)
         # Save image
         if i % 5 == 0:
             cv2.imwrite(
                 '../generated/layer_vis_l' + str(self.selected_layer) +
                 '_f' + str(self.selected_filter) + '_iter' + str(i) +
                 '.jpg', self.created_image)
Exemple #6
0
    def dream(self,
              device,
              affine_size=4,
              is_3d=False,
              update_count=250,
              is_depth_first=False):
        # Process image and return variable
        if type(self.created_image) is not np.ndarray:
            self.created_image = np.asarray(self.created_image)
            #if not is_3d:
            #    self.processed_image = preprocess_image(self.created_image, True)

        else:
            self.processed_image = torch.from_numpy(self.created_image).float()
            self.processed_image /= 255
            self.processed_image -= 0.485
            self.processed_image /= 0.229

        self.processed_image.requires_grad = True

        # Define optimizer for the image
        # Earlier layers need higher learning rates to visualize whereas layer layers need less
        optimizer = SGD([self.processed_image], lr=12, weight_decay=1e-4)
        for i in range(1, 251):
            optimizer.zero_grad()
            # Assign create image to a variable to move forward in the model
            x = self.processed_image.to(device)
            for index, layer in (self.model._modules.items()):
                # Forward
                if (index == 'fc'): x = x.flatten(1)
                x = layer(x)
                # Only need to forward until we the selected layer is reached
                if index == self.selected_layer:
                    break
            # Loss function is the mean of the output of the selected layer/filter
            # We try to minimize the mean of the output of that specific filter
            loss = -torch.mean(self.conv_output)
            #print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.cpu().numpy()))
            # Backward
            loss.backward()
            # Update image
            optimizer.step()
            # Recreate image
            if not is_3d and self.method is None:
                self.created_image = recreate_image(self.processed_image)
            else:
                self.created_image = self.processed_image
            # Save image every 20 iteration
            if i % update_count == 0:
                #print(self.created_image.shape)
                if is_3d == False:

                    im_path = self.im_path +"CNN_Vis_DeepDream_" + str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.png'
                    save_image(self.created_image, im_path)
                else:
                    im_path = self.im_path +"CNN_Vis_DeepDream_" + str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.nii.gz'
                    grads = self.created_image.squeeze().cpu().detach().numpy()
                    if is_depth_first:
                        grads = np.moveaxis(grads, 0, -1)

                    img = nb.Nifti1Image(grads, np.eye(affine_size))
                    nb.save(img, im_path)
    def visualise_layer_with_hooks(self,
                                   device,
                                   opPath,
                                   is_3d=False,
                                   size=(224, 224, 3),
                                   affine_size=4,
                                   isDepthFirst=False):
        # Hook the selected layer

        self.hook_layer()

        # Generate a random image
        random_image = np.uint8(np.random.uniform(150, 180, size))
        # Process image and return variable
        if not is_3d:
            processed_image = preprocess_image(random_image, False)
        else:
            processed_image = random_image
            processed_image = torch.from_numpy(processed_image).float()
        # Define optimizer for the image
        optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6)
        for i in range(1, 31):
            optimizer.zero_grad()
            # Assign create image to a variable to move forward in the model
            x = processed_image.to(device)
            for index, layer in (self.model._modules.items()):
                if (index == 'fc'): x = x.flatten(1)
                # Forward pass layer by layer
                # x is not used after this point because it is only needed to trigger
                # the forward hook function
                x = layer(x)
                # Only need to forward until the selected layer is reached
                if index == self.selected_layer:
                    # (forward hook function triggered)
                    break
            # Loss function is the mean of the output of the selected layer/filter
            # We try to minimize the mean of the output of that specific filter
            # print(self.conv_output)
            loss = -torch.mean(self.conv_output)
            #print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.cpu().numpy()))
            # Backward
            loss.backward()
            # Update image
            optimizer.step()
            # Recreate image
            if not is_3d: self.created_image = recreate_image(processed_image)
            else: self.created_image = processed_image
            # Save image
            if i % 30 == 0:
                if is_3d == False:
                    im_path = opPath + "Layer_Visualization_"+str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.png'
                    save_image(self.created_image, im_path)
                else:
                    im_path = opPath + "Layer_Visualization_"+str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.nii.gz'
                    grads = self.created_image.squeeze().cpu().detach().numpy()
                    if isDepthFirst:
                        grads = np.moveaxis(grads, 0, -1)
                    img = nb.Nifti1Image(grads, np.eye(affine_size))
                    nb.save(img, im_path)
    def visualise_layer_without_hooks(self,
                                      device,
                                      opPath,
                                      is_3d=False,
                                      size=(224, 224, 3),
                                      affine_size=4,
                                      isDepthFirst=False):
        # Process image and return variable
        # Generate a random image
        random_image = np.uint8(np.random.uniform(150, 180, size))
        # Process image and return variable
        if not is_3d:
            processed_image = preprocess_image(random_image, False)
        else:
            processed_image = random_image
            processed_image = torch.from_numpy(processed_image).float()

        # Define optimizer for the image
        optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6)
        for i in range(1, 31):
            optimizer.zero_grad()
            # Assign create image to a variable to move forward in the model
            x = processed_image.to(device)
            for index, layer in (self.model._modules.items()):
                if (index == 'fc'): x = x.flatten(1)
                # Forward pass layer by layer
                x = layer(x)
                if index == self.selected_layer:
                    # Only need to forward until the selected layer is reached
                    # Now, x is the output of the selected layer
                    break
            # Here, we get the specific filter from the output of the convolution operation
            # x is a tensor of shape 1x512x28x28.(For layer 17)
            # So there are 512 unique filter outputs
            # Following line selects a filter from 512 filters so self.conv_output will become
            # a tensor of shape 28x28
            self.conv_output = x[0, self.selected_filter]
            # Loss function is the mean of the output of the selected layer/filter
            # We try to minimize the mean of the output of that specific filter
            loss = -torch.mean(self.conv_output)
            #print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.cpu().numpy()))
            # Backward
            loss.backward()
            # Update image
            optimizer.step()
            # Recreate image
            if not is_3d:
                self.created_image = recreate_image(processed_image)
            else:
                self.created_image = processed_image
            # Save image
            if i % 30 == 0:
                if is_3d == False:
                    im_path = opPath + "Layer_Visualization_no_hook_" + str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.png'
                    save_image(self.created_image, im_path)
                else:
                    im_path = opPath + "Layer_Visualization_no_hook_" + str(self.selected_layer) + \
                    '_f' + str(self.selected_filter) + '_iter' + str(i) + '.nii.gz'
                    grads = self.created_image.squeeze().cpu().detach().numpy()
                    if isDepthFirst:
                        grads = np.moveaxis(grads, 0, -1)
                    img = nb.Nifti1Image(grads, np.eye(affine_size))
                    nb.save(img, im_path)