def generate(self): for i in range(1, 200): # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image optimizer = SGD([self.processed_image], lr=6) # Forward output = self.model(self.processed_image) # Get confidence from softmax target_confidence = functional.softmax(output)[0][ self.target_class].data.numpy()[0] if target_confidence > self.minimum_confidence: # Reading the raw image and pushing it through model to see the prediction # this is needed because the format of preprocessed image is float and when # it is written back to file it is converted to uint8, so there is a chance that # there are some losses while writing confirmation_image = cv2.imread( 'generated/fooling_sample_class_' + str(self.target_class) + '.jpg', 1) # Preprocess image confirmation_processed_image = preprocess_image( confirmation_image) # Get prediction confirmation_output = self.model(confirmation_processed_image) # Get confidence softmax_confirmation = \ functional.softmax(confirmation_output)[0][self.target_class].data.numpy()[0] if softmax_confirmation > self.minimum_confidence: print('Generated fooling image with', "{0:.2f}".format(softmax_confirmation), 'confidence at', str(i) + 'th iteration.') break # Target specific class class_loss = -output[0, self.target_class] print('Iteration:', str(i), 'Loss', "{0:.2f}".format(class_loss.data.numpy()[0])) # Zero grads self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image cv2.imwrite( 'generated/fooling_sample_class_' + str(self.target_class) + '.jpg', self.created_image) return self.processed_image
def dream(self): # Process image and return variable self.processed_image = preprocess_image(self.created_image, False) # Define optimizer for the image # Earlier layers need higher learning rates to visualize whereas layer layers need less optimizer = SGD([self.processed_image], lr=12, weight_decay=1e-4) for i in range(1, 251): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward x = layer(x) # Only need to forward until we the selected layer is reached if index == self.selected_layer: break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy()[0])) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image every 20 iteration if i % 20 == 0: cv2.imwrite( 'generated/layer_vis_l' + str(self.selected_layer) + '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg', self.created_image)
def preprocess_image_model(img_path_list): prep_img_list = [] for index, img_path in enumerate(img_path_list): input_image = Image.open(img_path).convert('RGB') prep_img = preprocess_image(input_image) prep_img_list.append(prep_img) return prep_img_list
def visualise_layer_with_hooks(self): # Hook the selected layer self.hook_layer() # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image optimizer = Adam([self.processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, 31): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function x = layer(x) # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image cv2.imwrite( "/home/abdullah/Documents/Layers visualization/results/{}.jpg". format(i), self.created_image)
def visualise_layer_without_hooks(self): # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image optimizer = Adam([self.processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, 31): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer x = layer(x) if index == self.selected_layer: # Only need to forward until the selected layer is reached # Now, x is the output of the selected layer break # Here, we get the specific filter from the output of the convolution operation # x is a tensor of shape 1x512x28x28.(For layer 17) # So there are 512 unique filter outputs # Following line selects a filter from 512 filters so self.conv_output will become # a tensor of shape 28x28 self.conv_output = x[0, self.selected_filter] # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = self.processed_image # Save image return self.created_image
def generate(self): initial_learning_rate = 6 for i in range(1, 150): # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image optimizer = SGD([self.processed_image], lr=initial_learning_rate) # Forward output = self.model(self.processed_image) # Target specific class class_loss = -output[0, self.target_class] print('Iteration:', str(i), 'Loss', "{0:.2f}".format(class_loss.data.numpy())) # Zero grads self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image cv2.imwrite('../generated/c_specific_iteration_' + str(i) + '.jpg', self.created_image) return self.processed_image
def visualise_layer_with_hooks(self): # Hook the selected layer self.hook_layer() # Generate a random image random_image = plt.imread("../data/valid/1/0000.jpg") # Process image and return variable processed_image = preprocess_image(random_image, False) # Define optimizer for the image optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, 31): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image.to(device) x = self.model.conv1(x) # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.cpu().numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(processed_image) # Save image if i == 30: im_path = '../generated/layer_vis_1' + \ '_f' + str(self.selected_filter) + '.jpg' save_image(self.created_image, im_path)
def dream(self): # Process image and return variable self.processed_image = preprocess_image(self.created_image, True) # Define optimizer for the image # Earlier layers need higher learning rates to visualize whereas layer layers need less optimizer = SGD([self.processed_image], lr=12, weight_decay=1e-4) for i in range(1, 251): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward x = layer(x) # Only need to forward until we the selected layer is reached if index == self.selected_layer: break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image every 20 iteration if i == 250: print(self.created_image.shape) if not os.path.exists('../result/generated/deep_dream/' + self.save_folder): os.makedirs('../result/generated/deep_dream/' + self.save_folder) im_path = '../result/generated/deep_dream/' + self.save_folder + '/' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path)
def generate(self, iterations=150): """Generates class specific image Keyword Arguments: iterations {int} -- Total iterations for gradient ascent (default: {150}) Returns: np.ndarray -- Final maximally activated class image """ initial_learning_rate = 6 for i in range(1, iterations): # Process image and return variable self.processed_image = preprocess_image(self.created_image, False) # Define optimizer for the image optimizer = SGD([self.processed_image], lr=initial_learning_rate) # Forward output = self.model(self.processed_image) # Target specific class class_loss = -output[0, self.target_class] self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) return self.created_image
def vis_grad(model, class_index, layer, image_path, size=[224, 224]): original_image = cv2.imread(image_path, 1) #plt.imshow(original_image) #plt.show() prep_img = preprocess_image(original_image, size) file_name_to_export = 'model' + '_classindex_' + str( class_index) + '-layer_' + str(layer) # Grad cam gcv2 = GradCam(model, target_layer=layer) # Generate cam mask cam = gcv2.generate_cam(prep_img, class_index, size) print('Grad cam completed') # Guided backprop GBP = GuidedBackprop(model) # Get gradients guided_grads = GBP.generate_gradients(prep_img, class_index) print('Guided backpropagation completed') # Guided Grad cam cam_gb = guided_grad_cam(cam, guided_grads) #save_gradient_images(cam_gb, file_name_to_export + '_GGrad_Cam') grayscale_cam_gb = convert_to_grayscale(cam_gb) #save_gradient_images(grayscale_cam_gb, file_name_to_export + '_GGrad_Cam_gray') print('Guided grad cam completed') cam_gb = trans(cam_gb) grayscale_cam_gb = trans(grayscale_cam_gb) return cam_gb, grayscale_cam_gb
def visualise_layer_with_hooks(self): # Hook the selected layer #self.hook_layer() # Process image and return variable self.processed_image = preprocess_image(self.created_image) cv2.imwrite( './generated/input_l' + str(self.selected_layer) + '_f' + str(self.selected_filter) + '.jpg', self.created_image) # Define optimizer for the image optimizer = Adam([self.processed_image], lr=0.1, weight_decay=1e-6) """ learning_rate = 0.001 batch_size = 16 momentum = 0.9 decay = 0.0005 optimizer = optim.SGD([self.processed_image], lr=learning_rate/batch_size, momentum=momentum, dampening=0, weight_decay=decay*batch_size) """ for i in range(1, 31): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function x = layer(x) # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break self.conv_output = x[0, self.selected_filter] #print(self.conv_output) #print(self.conv_output) # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) # loss #loss = region_loss(output, target) #print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image if i % 30 == 0: cv2.imwrite( './generated/output_l' + str(self.selected_layer) + '_f' + str(self.selected_filter) + '.jpg', self.created_image)
def visualise_layer_with_hooks(self, model_name, n_step, outdir, unit=-10): ''' Find optimal stimuli for this unit Parameters: -------------- n_step[int]: number of train step outdir[str]: put return into outdir unit[int]: the unit number Returens: -------------- created_image_unit: the image of optimal stimuli for one unit ''' # Hook the selected layer self.hook_layer() # Generate a random image if model_name == 'alexnet': random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3))) else: random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3))) # Process image and return variable processed_image = preprocess_image(random_image, False) # Define optimizer for the image optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, n_step): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function x = layer(x) # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter if unit == -10: loss = -torch.mean(self.conv_output) else: raw = math.floor(unit / self.conv_output.shape[0]) column = unit - (math.floor(unit / self.conv_output.shape[0]) ) * self.conv_output.shape[0] loss = -self.conv_output[raw][column] print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(-(loss.data.numpy()))) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(processed_image)
def generate(self): initial_learning_rate = self.ilr #self.maximumInternalIterations = 3000 for i in range(1, self.maximumInternalIterations + 1): if (i % (self.maximumInternalIterations / 10) == 0): print("Iteration", i) # Process image and return variable self.processed_image = preprocess_image(self.created_image, mean=self.mean, std=self.std) #print("processed_image",self.processed_image.shape) """ im_as_ten = torch.from_numpy(self.created_image.copy()).float() im_as_var = Variable(im_as_ten, requires_grad=True) self.processed_image = im_as_var """ # Define optimizer for the image] optimizer = SGD([self.processed_image], lr=initial_learning_rate, weight_decay=X) # Forward pass output = self.model(self.processed_image.cuda()) #output = self.model.forward2(self.processed_image.cuda()) # Target specific class class_loss = -output[0, self.target_class] """ #L1 Norm code. l1_crit = nn.L1Loss(size_average=False) #ipdb.set_trace() reg_loss = 0 for param in self.model.parameters(): target = Variable(torch.from_numpy(np.zeros(param.shape).astype(dtype=np.float64))).float() reg_loss += l1_crit(param, target.cuda()) factor = .0005 class_loss += factor * reg_loss """ # Zero grads self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image, mean=self.mean, std=self.std) #print("created_image",self.created_image.shape) # Save image if (i == self.maximumInternalIterations): #print('Iteration:', str(i), 'Loss', "{0:.2f}".format(class_loss.data.numpy()[0]), " mean:", m) cv2.imwrite( '../generated/' + self.n + '_initialLearningrate_' + str(ILR) + '_weightDecay_' + str(X) + '_ClassSpecificImageGeneration_class_' + str(self.target_class) + '_ir_' + str(self.iter) + 'iteration_' + str(i) + '.jpg', self.created_image[0]) return self.processed_image
def visualise_layer_without_hooks(self): # Process image and return variable # Generate a random image tmp = torch.from_numpy( np.random.uniform(150, 180, (1, 3, 32, 112, 112))).float() for i in range(self.frame_num): if self.video is not None: random_image = self.video[i, ...] else: random_image = np.uint8( np.random.uniform(150, 180, (112, 112, 3))) # Process image and return variable processed_image = preprocess_image(random_image, False) tmp[0, :, i, ...] = processed_image[0, ...] processed_image = Variable(tmp, requires_grad=True).to(self.device) # Define optimizer for the image optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, self.iter_num + 1): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer x = layer(x) if isinstance(layer, nn.MaxPool3d): x = x[0] if index == self.selected_layer: # Only need to forward until the selected layer is reached # Now, x is the output of the selected layer break # Here, we get the specific filter from the output of the convolution operation # x is a tensor of shape 1x512x28x28.(For layer 17) # So there are 512 unique filter outputs # Following line selects a filter from 512 filters so self.conv_output will become # a tensor of shape 28x28 self.conv_output = x[0, self.selected_filter] # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image # print(processed_image.size()) if i % self.iter_num == 0: for j in range(self.frame_num): out_img = processed_image[:, :, j, ...] # self.selected_frame self.created_image = recreate_image(out_img) # Save image im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_frame' + str(j) + '.jpg' save_image(self.created_image, im_path)
def visualise_layer_without_hooks(self,plots_path): # Process image and return variable # Generate a random image random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3))) # Process image and return variable processed_image = preprocess_image(random_image, False) # Define optimizer for the image optimizer = Adam([processed_image], lr=0.05, weight_decay=1e-6) for i in range(1, 101): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image kl = 0 for index, layer in enumerate(self.model): # Forward pass layer by layer if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Here, we get the specific filter from the output of the convolution operation # x is a tensor of shape 1x512x28x28.(For layer 17) # So there are 512 unique filter outputs # Following line selects a filter from 512 filters so self.conv_output will become # a tensor of shape 28x28 conv = 0 for index, layer in enumerate(self.model): if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): if conv == self.selected_layer: self.conv_output = x[0, self.selected_filter] print(conv) # print(self.conv_output) # print(self.conv_output.shape) break; conv = conv + 1 # self.conv_output = x[0, self.selected_filter] # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter # beta = 2 ** (1- (1 + 1)) / (2 ** 1 - 1) loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(processed_image) # Save image if i % 5 == 0: im_path = plots_path + 'layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path)
def visualise_layer_without_hooks(self, optim=None, iterations=30, save_only_last=True): # Process image and return variable self.processed_image = preprocess_image(self.created_image, self.mean, self.std, False) # Define optimizer for the image if optim == None: optimizer = Adam([self.processed_image], lr=0.1, weight_decay=1e-6) else: optimizer = optim for i in range(1, iterations + 1): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.layers): # Forward pass layer by layer x = layer(x) if index == self.selected_layer: # Only need to forward until the selected layer is reached # Now, x is the output of the selected layer break # Here, we get the specific filter from the output of the convolution operation # x is a tensor of shape 1x512x28x28.(For layer 17) # So there are 512 unique filter outputs # Following line selects a filter from 512 filters so self.conv_output will become # a tensor of shape 28x28 self.conv_output = x[0, self.selected_filter] # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) if i % 10 == 0: print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image, self.mean, self.std) # Save image if not save_only_last: if i % 5 == 0: im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path) else: if i == iterations: im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path)
def visualise_layer_with_hooks(self, optim=None, iterations=30, save_only_last=True): # Hook the selected layer self.hook_layer() # Process image and return variable self.processed_image = preprocess_image(self.created_image, self.mean, self.std, False) # Define optimizer for the image if optim == None: optimizer = Adam([self.processed_image], lr=0.1, weight_decay=1e-6) else: optimizer = optim for i in range(1, iterations + 1): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.layers): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function x = layer(x) # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) if i % 10 == 0: print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image, self.mean, self.std) # Save image if not save_only_last: if i % 5 == 0: im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path) else: if i == iterations: im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path)
def deconv_visualization(model, pic, png_dir, demode): pic = pic[None,:,:,:] pic = pic.cuda() x = model(pic) x = x.cpu().detach().numpy() x = x.squeeze(0) x = np.transpose(x, (1,2,0)) x = normalization(x) x = preprocess_image(x, resize_im=False) x = recreate_image(x) if not os.path.exists('./deconv/'+ png_dir): os.makedirs('./deconv/'+ png_dir) im_path = './deconv/'+ png_dir+ '/layer_vis_' + str(demode) + '.jpg' save_image(x, im_path)
def filter_visualization(model, selected_layer, selected_filter, png_dir): for name, param in net.named_parameters(): if name == selected_layer + '.weight': x = param x = x[selected_filter,:,:,:] x = x.cpu().detach().numpy() x = x.transpose(1,2,0) x = normalization(x) x = preprocess_image(x, resize_im=False) x = recreate_image(x) if not os.path.exists('./filter/'+ png_dir): os.makedirs('./filter/'+ png_dir) im_path = './filter/'+ png_dir+ '/layer_vis_' + str(selected_layer) + \ '_f' + str(selected_filter) + '_iter' + str(i) + '.jpg' save_image(x, im_path)
def construct_negative_samples(model, itr): assert(itr > 0) print('Adding negative samples ...') # switch to evaluate mode model.eval() target_class = 1 # NI alpha = 2./255 itr_max = 200 mask = 0.96 grad_type = 'random' # add negative sample pn_num = 0 total_itr = 0 max_itr = 0 for line in image_name: img_path = os.path.join(img_des_dir, line) img = pil_loader(img_path) img_color = preprocess_image(img).cuda() # Forward pass with torch.no_grad(): output = model(img_color) init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability if init_pred.cpu().item() == target_class: continue prob_threshold = [0.5, 0.3] negative_samples, itrTmp = IMGSM(model, img, target_class, alpha, itr_max, prob_threshold, mask, grad_type) if negative_samples.width > 233 or negative_samples.height > 233: continue filename, file_extension = os.path.splitext(line) new_image_name = filename + '-cgadvsam-' + str(itr) +'.png' negative_samples.save(os.path.join(img_des_dir, new_image_name)) pn_num += 1 total_itr += itrTmp max_itr = max(max_itr, itrTmp) print('The number of samples: ', pn_num) print('Average itrations for samples generation: ', total_itr/pn_num) return max_itr
def visualise_layer_with_hooks(self,plots_path): # Hook the selected layer self.hook_layer() # Generate a random image random_image = np.uint8(np.random.uniform(150, 180, (227, 227, 3))) # Process image and return variable processed_image = preprocess_image(random_image, False) # Define optimizer for the image vi = GaussianVariationalInference(torch.nn.CrossEntropyLoss()) optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, 101): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image kl = 0 for index, layer in enumerate(self.model): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter # loss = -torch.mean(self.conv_output) loss = vi(outputs, y, kl, beta) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(processed_image) # Save image if i % 5 == 0: im_path = plots_path + 'layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' print('saving image at:' + im_path) save_image(self.created_image, im_path)
def vis_layer(encorder, decorder, pic, png_dir, demode=1, index=1): """ visualing the layer deconv result """ pic = pic[None,:,:,:] pic = pic.cuda() encorder_out, indices = encorder(pic) num_feat = encorder_out.shape[1] if demode==1: activation_num = (encorder_out.shape[2]*encorder_out.shape[3])//10 else : activation_num = (encorder_out.shape[2]*encorder_out.shape[3])//2 # set other feature map activations to zero new_feat_map = encorder_out.clone() # choose the max activations map for i in range(0, num_feat): choose_map = new_feat_map[0, i, :, :] # print(choose_map) map_clone = choose_map.clone() new_map = torch.zeros(choose_map.shape, device='cuda') for j in range(activation_num): activation = torch.max(map_clone) new_map = torch.where(map_clone==activation, map_clone, new_map ) map_clone= torch.where(map_clone==activation, torch.zeros(map_clone.shape, device='cuda'), map_clone ) new_feat_map[0, i, :, :] = new_map deconv_output = decorder(new_feat_map, indices) x = deconv_output.cpu().detach().numpy() x = x.squeeze(0) x = np.transpose(x, (1,2,0)) x = normalization(x) x = preprocess_image(x, resize_im=False) x = recreate_image(x) if not os.path.exists('./deconv/'+ png_dir): os.makedirs('./deconv/'+ png_dir) im_path = './deconv/'+ png_dir+ '/layer_vis_' + str(demode) +'_' + str(index)+ '.jpg' save_image(x, im_path)
def generate(self): initial_learning_rate = self.lr loss = 10 i = 0 while loss >= self.min_loss: # Process image and return variable self.processed_image = preprocess_image(self.created_image, self.mean, self.std, False) if self.optim == None: optimizer = Adam([self.processed_image], lr=initial_learning_rate, weight_decay=1e-6) else: optimizer = self.optim # Forward output = self.model(self.processed_image) # Target specific class class_loss = -output[0, self.target_class] if loss > class_loss.data.numpy(): loss = class_loss.data.numpy() if i % 25 == 0: print('Iteration:', str(i), 'Loss', "{0:.2f}".format(class_loss.data.numpy())) # Zero grads self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image, self.mean, self.std) if i % 25 == 0: # Save image im_path = '../generated/' + str( self.name) + '_iteration_' + str(i) + '.jpg' save_image(self.created_image, im_path) i += 1 im_path = '../generated/' + str(self.name) + '_iteration_final.jpg' save_image(self.created_image, im_path) return self.processed_image
def vis_gradcam(model, class_index, layer, image_path, size=[224, 224]): original_image = cv2.imread(image_path, 1) #plt.imshow(original_image) #plt.show() prep_img = preprocess_image(original_image, size) file_name_to_export = 'model' + '_classindex_' + str( class_index) + '-layer_' + str(layer) # Grad cam gcv2 = GradCam(model, target_layer=layer) # Generate cam mask cam = gcv2.generate_cam(prep_img, class_index, size) print('Grad cam completed') #save_class_activation_on_image(original_image, cam, file_name_to_export) img_with_heatmap, activation_heatmap = act_on_img(original_image, cam, size) return cam, activation_heatmap, img_with_heatmap
def visualise_layer_without_hooks(self): # Process image and return variable # Generate a random image random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3))) # Process image and return variable processed_image = preprocess_image(random_image, False) # Define optimizer for the image optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, 31): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer x = layer(x) if index == self.selected_layer: # Only need to forward until the selected layer is reached # Now, x is the output of the selected layer break # Here, we get the specific filter from the output of the convolution operation # x is a tensor of shape 1x512x28x28.(For layer 17) # So there are 512 unique filter outputs # Following line selects a filter from 512 filters so self.conv_output will become # a tensor of shape 28x28 self.conv_output = x[0, self.selected_filter] # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss[0].data.numpy()[0])) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(processed_image) # Save image if i % 30 == 0: im_path = self.path_to_output_files + '/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path)
def visualise_layer_without_hooks(self): # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image # Earlier layers need higher learning rates to visualize whereas later layers need less optimizer = SGD([self.processed_image], lr=6, weight_decay=1e-6) for i in range(1, 51): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer x = layer(x) if index == self.selected_layer: # Only need to forward until the selected layer is reached # Now, x is the output of the selected layer break # Here, we get the specific filter from the output of the convolution operation # x is a tensor of shape 1x512x28x28.(For layer 17) # So there are 512 unique filter outputs # Following line selects a filter from 512 filters so self.conv_output will become # a tensor of shape 28x28 self.conv_output = x[0, self.selected_filter] # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy()[0])) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image if i % 5 == 0: cv2.imwrite('../generated/layer_vis_l' + str(self.selected_layer) + '_f' + str(self.selected_filter) + '_iter'+str(i)+'.jpg', self.created_image) plt.imshow(self.created_image) print(i) plt.show()
def visualise_layer_with_hooks(self): # Hook the selected layer self.hook_layer() # Generate a random image random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3))) print("image", random_image) # random_image = Image.open("dog.jpg") # Process image and return variable processed_image = preprocess_image(random_image, False) # Define optimizer for the image optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6) for i in range(1, 31): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function x = layer(x) # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(processed_image) # Save image if i % 5 == 0: im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path)
def dream(self): # Process image and return variable self.processed_image = preprocess_image(self.created_image, True) plt.rcParams['figure.figsize'] = [24.0, 14.0] # Define optimizer for the image # Earlier layers need higher learning rates to visualize whereas layer layers need less optimizer = SGD([self.processed_image], lr=12, weight_decay=1e-4) # Create a figure and plot the image fig, pl = plt.subplots(1, 1) for i in range(1, 10): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward x = layer(x) # Only need to forward until we the selected layer is reached if index == self.selected_layer: break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = -torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy())) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image every 20 iteration if i % 10 == 0: print(self.created_image.shape) im_path = 'generated/ddream_l' + str(self.selected_layer) + \ '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg' save_image(self.created_image, im_path) plt.savefig('output.jpg') return send_file('output.jpg', mimetype='image/jpg')
def generate(self, iterations=150): """Generates class specific image Keyword Arguments: iterations {int} -- Total iterations for gradient ascent (default: {150}) Returns: np.ndarray -- Final maximally activated class image """ initial_learning_rate = 6 for i in range(1, iterations): # Process image and return variable self.processed_image = preprocess_image(self.created_image, False) # Define optimizer for the image optimizer = SGD([self.processed_image], lr=initial_learning_rate) # Forward output = self.model(self.processed_image) # Target specific class class_loss = -output[0, self.target_class] if i % 10 == 0 or i == iterations-1: print('Iteration:', str(i), 'Loss', "{0:.2f}".format(class_loss.data.numpy())) # Zero grads self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) if i % 10 == 0 or i == iterations-1: # Save image im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png' save_image(self.created_image, im_path) return self.processed_image
def visualise_layer_with_hooks(self): # Hook the selected layer self.hook_layer() # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image # Earlier layers need higher learning rates to visualize whereas later layers need less optimizer = SGD([self.processed_image], lr=5, weight_decay=1e-6) for i in range(1, 51): optimizer.zero_grad() # Assign create image to a variable to move forward in the model x = self.processed_image for index, layer in enumerate(self.model): # Forward pass layer by layer # x is not used after this point because it is only needed to trigger # the forward hook function x = layer(x) # Only need to forward until the selected layer is reached if index == self.selected_layer: # (forward hook function triggered) break # Loss function is the mean of the output of the selected layer/filter # We try to minimize the mean of the output of that specific filter loss = torch.mean(self.conv_output) print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy()[0])) # Backward loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image if i % 5 == 0: cv2.imwrite( 'generated/layer_vis_l' + str(self.selected_layer) + '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg', self.created_image)
def generate(self): initial_learning_rate = 6 for i in range(1, 150): # Process image and return variable self.processed_image = preprocess_image(self.created_image) # Define optimizer for the image optimizer = SGD([self.processed_image], lr=initial_learning_rate) # Forward output = self.model(self.processed_image) # Target specific class class_loss = -output[0, self.target_class] print('Iteration:', str(i), 'Loss', "{0:.2f}".format(class_loss.data.numpy()[0])) # Zero grads self.model.zero_grad() # Backward class_loss.backward() # Update image optimizer.step() # Recreate image self.created_image = recreate_image(self.processed_image) # Save image cv2.imwrite('../generated/c_specific_iteration_'+str(i)+'.jpg', self.created_image) return self.processed_image