def main(args): set_cuda(args) set_seed(args) loader_train, loader_val, loader_test = get_data_loaders(args) loss = get_loss(args) model = get_model(args) optimizer = get_optimizer(args, model, loss, parameters=model.parameters()) xp = setup_xp(args, model, optimizer) for i in range(args.epochs): xp.epoch.update(i) train(model, loss, optimizer, loader_train, args, xp) test(model, optimizer, loader_val, args, xp) if (i + 1) in args.T: decay_optimizer(optimizer, args.decay_factor) load_best_model(model, '{}/best_model.pkl'.format(args.xp_name)) test(model, optimizer, loader_val, args, xp) test(model, optimizer, loader_test, args, xp) if __name__ == '__main__': args = parse_command() with mlogger.stdout_to("{}/log.txt".format(args.xp_name), enabled=args.log): main(args)
def generate_training_dataset(properties_filename, model_name, pgd_learning_rate, num_iterations, output_filename, log_filename=None, epsilon_factor=1.0, subset=None, device='cpu', timeout=float('inf')): """ This function generates the training dataset to perform supervised learning for the GNN. It does so by performing PGD attacks with random initializations and big number of steps, following Branch & Bound algorithm, until an adversarial example is found for each property. It then stores the lower and upper bounds associated with each such adversarial example in the training dataset. It acts as a wrapper for the pgd_attack_property_until_successful() function which deals with 1 property at a time. """ # Load all the required data for the images which were correctly verified by the model correctly_verified_images, true_labels_cv_images, image_indices_cv_images, model = load_verified_data( model_name) # Load the lists of images, true and test labels and epsilons for images which appear in both the properties dataset # as verified and in the previously loaded images list images, true_labels, test_labels, epsilons = match_with_properties( properties_filename, correctly_verified_images, true_labels_cv_images, image_indices_cv_images) # If the subset of indices was specified for the purpose of reducing the time complexity, drop the elements of # images, true_labels, test_labels and epsilons not indicated in the subset indices if subset is not None: images, true_labels, test_labels, epsilons = match_with_subset( subset, images, true_labels, test_labels, epsilons) # Initialise the overall list of dictionaries for storing the features of all the considered properties overall_list_of_feature_dicts = [] # Go over one property at a time and make a function call which deals with the single property for i in range(len(images)): # First, simplify the network by adding the final layer and merging the last two layers into one, incorporating # the information about the true and test classes into the network simplified_model = simplify_model(model, true_labels[i], test_labels[i]) # The first attack needs to be unsuccessful because to utilise each training property in training the GNN # feature vectors have to be constructed for each property and this cannot be achieved if there is no # information about the gradients of the previous unsuccessful PGD attack. Hence, perform the first attack # until it is unsuccessful feature_dict = pgd_attack_property_until_unsuccessful( simplified_model, images[i], epsilons[i] * epsilon_factor, pgd_learning_rate, num_iterations, device=device, timeout=timeout) # If a timeout was reached and None was returned above, print this and move on to the next property if feature_dict is None: if log_filename is not None: with mlogger.stdout_to('GNN_training/' + log_filename): print("Image " + str(i + 1) + " TIMED OUT before an unsuccessful attack was found") else: print("Image " + str(i + 1) + " TIMED OUT before an unsuccessful attack was found") continue # Otherwise, print that an unsuccessful attack was found elif log_filename is not None: with mlogger.stdout_to('GNN_training/' + log_filename): print("Image " + str(i + 1) + " was attacked unsuccessfully") else: print("Image " + str(i + 1) + " was attacked unsuccessfully") # Now make a call to the function which attacks the property until a successful counter-example is found in # order to obtain the ground-truth values of a successful attack ground_truth_attack = pgd_attack_property_until_successful( simplified_model, images[i], epsilons[i] * epsilon_factor, pgd_learning_rate, num_iterations, device=device, timeout=timeout) # If a timeout was reached and None was returned above, print this and move on to the next property if ground_truth_attack is None: if log_filename is not None: with mlogger.stdout_to('GNN_training/' + log_filename): print("Image " + str(i + 1) + " TIMED OUT before a successful attack was found") else: print("Image " + str(i + 1) + " TIMED OUT before a successful attack was found") continue # Otherwise, print that a successful attack was found elif log_filename is not None: with mlogger.stdout_to('GNN_training/' + log_filename): print("Image " + str(i + 1) + " was attacked successfully") else: print("Image " + str(i + 1) + " was attacked successfully") # Add the ground truth attack to the feature dictionary of the current property. Also add its true and test # labels to the dictionary feature_dict['successful attack'] = ground_truth_attack feature_dict['true label'] = true_labels[i] feature_dict['test label'] = test_labels[i] # Append the generated feature dictionary to the overall list overall_list_of_feature_dicts.append(feature_dict) # Store all the generated subdomains in a file torch.save(overall_list_of_feature_dicts, 'cifar_exp/' + output_filename)
def cross_validate_gnn(loss_lambda, validation_properties_filename, model_name, pgd_learning_rate, num_iterations, num_attack_epochs, num_trials, num_initialisations, training_dataset_filename=None, gnn_learning_rate=0.0001, num_training_epochs=30, log_filepath=None, epsilon_factor=1.0, device='cpu'): """ This function performs the cross-validation procedure using a single value of lambda. """ # Start the timer start_time = time.time() # If the train argument was provided, train the GNN first if training_dataset_filename: # Initialize the filepath in which the GNN parameters and the training losses for the given value of lambda # will be stored parameters_filepath = 'experiment_results/gnn_parameters_cross_val_' + str( loss_lambda) + '.pkl' training_dict_filepath = 'experiment_results/training_dict_' + str( loss_lambda) + '.pkl' # Train the GNN using the current value of lambda and output the learnt parameters in the temporary file training_dict = generate_gnn_training_parameters( training_dataset_filename, model_name, gnn_learning_rate, num_training_epochs, loss_lambda, parameters_filepath, log_filepath=log_filepath, device=device) if log_filepath is not None: with mlogger.stdout_to(log_filepath): print('\nTrained GNN with lambda = ' + str(loss_lambda)) print('Time elapsed since the start: ' + str(time.time() - start_time)) print('Epoch losses progression:\n') print(training_dict) print('\n') else: print('\nTrained GNN with lambda = ' + str(loss_lambda)) print('Time elapsed since the start: ' + str(time.time() - start_time)) print('Epoch losses progression:\n') print(training_dict) print('\n') # Save the training dictionary in the appropriate filepath torch.save(training_dict, training_dict_filepath) # If the train argument was specified as False, skip the training stage. In this case, the parameters should lie in # the different directory else: parameters_filepath = 'experiment_results/cross_validation_gnn_parameters/gnn_parameters_cross_val_' +\ str(loss_lambda) + '.pkl' # Let the GNN perform PGD attacks on the validation dataset validation_attack_success_rate = pgd_gnn_attack_properties( validation_properties_filename, model_name, epsilon_factor, pgd_learning_rate, num_iterations, num_attack_epochs, num_trials, num_initialisations, parameters_filepath, log_filepath=log_filepath, device=device) if log_filepath is not None: with mlogger.stdout_to(log_filepath): print( 'Performed PGD attacks on the validation dataset. Attack success rate = ' + str(validation_attack_success_rate) + '%') print('Time elapsed since the start: ' + str(time.time() - start_time)) else: print( 'Performed PGD attacks on the validation dataset. Attack success rate = ' + str(validation_attack_success_rate) + '%') print('Time elapsed since the start: ' + str(time.time() - start_time)) # Initialise the cross validation dictionary and save it in the experiment results dictionary cross_validation_dict = { 'lambda': loss_lambda, 'attack success rate': validation_attack_success_rate } torch.save( cross_validation_dict, 'experiment_results/cross_validation_dict_' + str(loss_lambda) + '.pkl')
def main(args): set_cuda(args) set_seed(args) loader_train, loader_val, loader_test = get_data_loaders(args) loss = get_loss(args) model = get_model(args) optimizer = get_optimizer(args, parameters=model.parameters()) xp = setup_xp(args, model, optimizer) for i in range(args.epochs): xp.epoch.update(i) train(model, loss, optimizer, loader_train, args, xp) test(model, loader_val, args, xp) if (i + 1) in args.T: decay_optimizer(optimizer, args.decay_factor) load_best_model(model, xp) test(model, loader_test, args, xp) if __name__ == '__main__': args = parse_command() with mlogger.stdout_to("{}/log.txt".format(args.xp_name)): main(args)
def pgd_attack_properties(properties_filename, model_name, epsilon_factor, pgd_learning_rate, num_iterations, num_trials, output_filename, log_filepath=None, subset=None, device='cpu'): """ This function acts as the 2nd baseline to compare the pgd_gnn_attack_property() function against. It initialises a specified number of trial random PGD attacks and performs a specified number of iterations of gradient ascent. For effective comparison, the product number_of_trials * number_of_iterations_per_trial must match the total number of iterations made by pgd_gnn_attack_property(), given by number_of_epochs * number_of_iterations_per_epoch. """ # Load all the required data for the images which were correctly verified by the model verified_images, verified_true_labels, verified_image_indices, model = load_verified_data(model_name) # Load the lists of images, true and test labels and epsilons for images which appear in both the properties dataset # as verified and in the previously loaded images list images, true_labels, test_labels, epsilons = match_with_properties(properties_filename, verified_images, verified_true_labels, verified_image_indices) # If the subset of indices was specified for the purpose of reducing the time complexity, drop the elements of # images, true_labels, test_labels and epsilons not indicated in the subset indices if subset is not None: images, true_labels, test_labels, epsilons = match_with_subset(subset, images, true_labels, test_labels, epsilons) # Now attack each property in turn for the specified number of trials. Initialise the counter of properties which # were successfully PGD attacked as well as the start time of the experiment. Also initialise the output dictionary num_successful_attacks = 0 start_time = time.time() output_dict = {'times': [], 'attack success rates': []} for i in range(len(images)): # First, simplify the network by adding the final layer and merging the last two layers into one, # incorporating the information about the true and test classes into the network simplified_model = simplify_model(model, true_labels[i], test_labels[i]) successful_attack_flag = False for trial in range(num_trials): # First, perturb the image randomly within the allowed bounds lower_bound = torch.add(-epsilons[i] * epsilon_factor, images[i]) upper_bound = torch.add(epsilons[i] * epsilon_factor, images[i]) perturbed_image = perturb_image(lower_bound, upper_bound) # Now perform a single PGD attack successful_attack_flag, _, _ = gradient_ascent(simplified_model, perturbed_image, lower_bound, upper_bound, pgd_learning_rate, num_iterations, device=device) # If the attack was unsuccessful, increase the counter and break from the loop if successful_attack_flag: num_successful_attacks += 1 break if log_filepath is not None: if successful_attack_flag: with mlogger.stdout_to(log_filepath): print('Image ' + str(i + 1) + ' was attacked successfully') else: with mlogger.stdout_to(log_filepath): print('Image ' + str(i + 1) + ' was NOT attacked successfully') # Calculate the attack success rate for the properties in the file provided after all the PGD attacks attack_success_rate = 100.0 * num_successful_attacks / len(images) # Store the time and current attack success rate in the output dictionary output_dict['times'].append(time.time() - start_time) output_dict['attack success rates'].append(attack_success_rate) # Finally, store the output dictionary in the prescribed location in the current folder torch.save(output_dict, 'experiment_results/' + output_filename)
def generate_gnn_training_parameters(training_dataset_filename, model_name, gnn_learning_rate, num_epochs, loss_lambda, parameters_output_filepath, log_filepath=None, device='cpu'): """ This function performs training of a Graph Neural Network by utilising supervised learning. After the parameters of the Graph Neural Network are learned, they are stored in a desired file. """ # First, load the training dataset which is a list of feature dictionaries from the specified filename. Also load # the model. If the combined training dataset is specified, construct it from all the relevant parts if training_dataset_filename == 'train_SAT_jade_combined_dataset.pkl': # First, extract all the parts of the training dataset into a list training_filenames_list = glob.glob( 'cifar_exp/train_SAT_jade_combined_dataset_*') # Construct the list of dictionaries fromm the parts of the overall training dataset list_of_feature_dicts = [] for filename in training_filenames_list: list_of_feature_dicts += torch.load(filename) # If the combined training and validation dataset is specified, construct it from all the relevant parts elif training_dataset_filename == 'train_val_SAT_jade_combined_dataset.pkl': # First, extract all the parts of the training dataset into a list training_filenames_list = glob.glob( 'cifar_exp/train_SAT_jade_combined_dataset_*') # Construct the list of dictionaries fromm the parts of the overall training dataset list_of_feature_dicts = [] for filename in training_filenames_list: list_of_feature_dicts += torch.load(filename) # Finally, add the validation dataset information to the overall list list_of_feature_dicts += torch.load( 'cifar_exp/val_SAT_jade_dataset.pkl') # Otherwise, load the training dataset all at once else: list_of_feature_dicts = torch.load('cifar_exp/' + training_dataset_filename) model = load_trained_model(model_name) # Create the temporary variables which will only be used to initialise the GNN structure. Then create an instance of # the GraphNeuralNetwork object using these variables temp_simplified_model = simplify_model(model, 0, 1) temp_input_size = list_of_feature_dicts[0]['successful attack'].size() temp_input_feature_size = list_of_feature_dicts[0]['input'].size()[0] temp_relu_feature_size = list_of_feature_dicts[0]['hidden'][0].size()[0] temp_output_feature_size = list_of_feature_dicts[0]['output'].size()[0] gnn = GraphNeuralNetwork(temp_simplified_model, temp_input_size, temp_input_feature_size, temp_relu_feature_size, temp_output_feature_size, training_mode=True, device=device) # TODO gnn.load_parameters( 'experiment_results/cross_validation_gnn_parameters/gnn_parameters_cross_val_0.069.pkl' ) if device == 'cuda' and torch.cuda.is_available(): for dict_idx in range(len(list_of_feature_dicts)): list_of_feature_dicts[dict_idx]['input'] = list_of_feature_dicts[ dict_idx]['input'].cuda() list_of_feature_dicts[dict_idx]['hidden'] = [ tensor.cuda() for tensor in list_of_feature_dicts[dict_idx]['hidden'] ] list_of_feature_dicts[dict_idx]['output'] = list_of_feature_dicts[ dict_idx]['output'].cuda() list_of_feature_dicts[dict_idx][ 'successful attack'] = list_of_feature_dicts[dict_idx][ 'successful attack'].cuda() # Initialise the optimizer on the parameters of the GNN optimizer = torch.optim.Adam(gnn.parameters(), lr=gnn_learning_rate) # Initialize the dictionary to store both epoch loss terms progression and the lambda parameter in training_dict = { 'loss term 1': [], 'loss term 2': [], 'lambda': loss_lambda } # Follow the training algorithm for a specified number of epochs for epoch in range(num_epochs): # Initialize the variable which will accumulate the losses related to both loss terms over each epoch epoch_loss_term_1 = 0 epoch_loss_term_2 = 0 # For each property appearing in the training dataset for property_index in range(len(list_of_feature_dicts)): feature_dict = list_of_feature_dicts[property_index] # Update the last layer of the GNN according to the currently considered true and test labels gnn.reconnect_last_layer(model_name, feature_dict['true label'], feature_dict['test label']) # Perform a series of forward and backward updates of all the embedding vectors within the GNN gnn.update_embedding_vectors(feature_dict['input'], feature_dict['hidden'], feature_dict['output']) # Update the domain bounds for each pixel based on the GNN outputs old_lower_bound = feature_dict['input'][0, :].reshape( temp_input_size) old_upper_bound = feature_dict['input'][1, :].reshape( temp_input_size) if device == 'cuda' and torch.cuda.is_available(): old_lower_bound = old_lower_bound.cuda() old_upper_bound = old_upper_bound.cuda() new_lower_bound, new_upper_bound = gnn.compute_updated_bounds( old_lower_bound, old_upper_bound) if device == 'cuda' and torch.cuda.is_available(): new_lower_bound = new_lower_bound.cuda() new_upper_bound = new_upper_bound.cuda() # Compute the loss by making a call to the special function and add it to the accumulator variable loss, loss_term_1, loss_term_2 = compute_loss( old_lower_bound, old_upper_bound, new_lower_bound, new_upper_bound, feature_dict['successful attack'], loss_lambda, device=device) epoch_loss_term_1 += loss_term_1.item() epoch_loss_term_2 += loss_term_2.item() if device == 'cuda' and torch.cuda.is_available(): loss = loss.cuda() # Make the optimizer step in a usual manner optimizer.zero_grad() loss.backward() optimizer.step() # Append the mean epoch losses during the current epoch to the list mean_epoch_loss_term_1 = epoch_loss_term_1 / len(list_of_feature_dicts) mean_epoch_loss_term_2 = epoch_loss_term_2 / len(list_of_feature_dicts) training_dict['loss term 1'].append(mean_epoch_loss_term_1) training_dict['loss term 2'].append(mean_epoch_loss_term_2) # Print a message to the terminal at the end of each epoch if log_filepath is not None: with mlogger.stdout_to(log_filepath): print("Epoch " + str(epoch + 1) + " complete. Loss term 1: " + str(mean_epoch_loss_term_1) + '. Loss term 2 times lambda:' + str(mean_epoch_loss_term_2 * loss_lambda)) else: print("Epoch " + str(epoch + 1) + " complete. Loss term 1: " + str(mean_epoch_loss_term_1) + '. Loss term 2 times lambda:' + str(mean_epoch_loss_term_2 * loss_lambda)) # Finally, construct a list of all the state dictionaries of the auxiliary neural networks of the GNN and save it gnn_state_dicts_list = [] gnn_neural_networks = gnn.get_auxiliary_networks_list() for gnn_neural_network in gnn_neural_networks: gnn_state_dicts_list.append(gnn_neural_network.state_dict()) torch.save(gnn_state_dicts_list, parameters_output_filepath) return training_dict
def pgd_gnn_attack_property(simplified_model, image, epsilon, epsilon_factor, pgd_learning_rate, num_iterations, num_attack_epochs, num_trials, num_initialisations, gnn_parameters_filepath, log_filepath=None, device='cpu'): """ This function performs the PGD attack on the specified property characterised by its image, corresponding simplified model and epsilon value by utilising the GNN framework. The first PGD attack is important so a number of restarts is specified by one of the arguments. During each restart, the GNN performs a specified number of bound updates and after each such update a specified number of trial PGD attacks are performed on the new domain. """ # For a specified number of restarts for initialisation in range(num_initialisations): # First, perturb the image randomly within the allowed bounds and perform a PGD attack lower_bound = torch.add(-epsilon * epsilon_factor, image) upper_bound = torch.add(epsilon * epsilon_factor, image) perturbed_image = perturb_image(lower_bound, upper_bound) successful_attack_flag, perturbed_image, gradient_info_dict = gradient_ascent( simplified_model, perturbed_image, lower_bound, upper_bound, pgd_learning_rate, num_iterations, device=device) # If the attack was successful, the procedure can be terminated and True can be returned if successful_attack_flag: if log_filepath is not None: with mlogger.stdout_to(log_filepath): print("Initial PGD attack succeeded") else: print("Initial PGD attack succeeded") return True # Otherwise, the GNN framework approach must be followed. First, generate the feature vectors for all layers input_feature_vectors = generate_input_feature_vectors( lower_bound, upper_bound, perturbed_image, gradient_info_dict) relu_feature_vectors_list, output_feature_vectors = generate_relu_output_feature_vectors( simplified_model, lower_bound, upper_bound, perturbed_image) # Initialise the GNN for the given network (which also initialises all the required auxiliary neural networks) gnn = GraphNeuralNetwork(simplified_model, image.size(), input_feature_vectors.size()[0], relu_feature_vectors_list[0].size()[0], output_feature_vectors.size()[0]) # Load the learnt GNN parameters into the GNN gnn.load_parameters(gnn_parameters_filepath) # Follow the GNN framework approach for a specified number of epochs for attack_epoch in range(num_attack_epochs): # Perform a series of forward and backward updates of all the embedding vectors within the GNN gnn.update_embedding_vectors(input_feature_vectors, relu_feature_vectors_list, output_feature_vectors) # Update the domain bounds for each pixel based on the pixel scores above lower_bound, upper_bound = gnn.compute_updated_bounds( lower_bound, upper_bound) # For a specified number of random restarts, perform randomly initialised PGD attacks on the new subdomain for trial in range(num_trials): # TODO if num_attack_epochs == 2 and attack_epoch == 1 and trial == 14: break if num_attack_epochs == 3 and attack_epoch == 2 and trial == 9: break if num_attack_epochs == 4 and attack_epoch != 0 and trial == 7: break if num_attack_epochs == 5 and attack_epoch == 4 and trial == 5: break if num_attack_epochs == 6 and attack_epoch == 5 and trial == 4: break if num_attack_epochs == 7 and attack_epoch != 0 and trial == 4: break # Perturb each pixel within the updated domain bounds perturbed_image = perturb_image(lower_bound, upper_bound) # Perform a PGD attack given the new bounds and perturbation successful_attack_flag, perturbed_image, gradient_info_dict = gradient_ascent( simplified_model, perturbed_image, lower_bound, upper_bound, pgd_learning_rate, num_iterations, device=device) # If the attack was successful, the procedure can be terminated and True can be returned, otherwise # continue if successful_attack_flag: if log_filepath is not None: with mlogger.stdout_to(log_filepath): print("PGD attack succeeded during: (Trial " + str(trial + 1) + "; Attack Epoch " + str(attack_epoch + 1) + "; Initialisation " + str(initialisation + 1) + ")") else: print("PGD attack succeeded during: (Trial " + str(trial + 1) + "; Attack Epoch " + str(attack_epoch + 1) + "; Initialisation " + str(initialisation + 1) + ")") return True # Otherwise, update all the feature vectors using new information if the attack epoch number is not the last # one if attack_epoch != num_attack_epochs - 1: input_feature_vectors = generate_input_feature_vectors( lower_bound, upper_bound, perturbed_image, gradient_info_dict) relu_feature_vectors_list, output_feature_vectors = generate_relu_output_feature_vectors( simplified_model, lower_bound, upper_bound, perturbed_image) # If the limit on the number of restarts was reached and no PGD attack was successful, return False return False
def pgd_gnn_attack_properties(properties_filename, model_name, epsilon_factor, pgd_learning_rate, num_iterations, num_attack_epochs, num_trials, num_initialisations, gnn_parameters_filepath, output_filename=None, log_filepath=None, subset=None, device='cpu'): """ This function acts aims to find adversarial examples for each property in the file specified. It acts as a container for the function which attacks each property in turn by calling this function for each property. """ # Load all the required data for the images which were correctly verified by the model verified_images, verified_true_labels, verified_image_indices, model = load_verified_data( model_name) # Load the lists of images, true and test labels and epsilons for images which appear in both the properties dataset # as verified and in the previously loaded images list images, true_labels, test_labels, epsilons = match_with_properties( properties_filename, verified_images, verified_true_labels, verified_image_indices) # If the subset of indices was specified for the purpose of reducing the time complexity, drop the elements of # images, true_labels, test_labels and epsilons not indicated in the subset indices if subset is not None: images, true_labels, test_labels, epsilons = match_with_subset( subset, images, true_labels, test_labels, epsilons) # Now attack each property in turn by calling the appropriate function. Initialise the counter of properties which # were successfully PGD attacked, the starting time of the experiment and the attack success rate. Also initialise # the output dictionary containing the empty lists of times and corresponding attack success rates num_successful_attacks = 0 attack_success_rate = 0 start_time = time.time() output_dict = {'times': [], 'attack success rates': []} for i in range(len(images)): # First, simplify the network by adding the final layer and merging the last two layers into one, incorporating # the information about the true and test classes into the network simplified_model = simplify_model(model, true_labels[i], test_labels[i]) # Use the special function which attacks one particular property using the GNN successful_attack_flag = pgd_gnn_attack_property( simplified_model, images[i], epsilons[i], epsilon_factor, pgd_learning_rate, num_iterations, num_attack_epochs, num_trials, num_initialisations, gnn_parameters_filepath, log_filepath=log_filepath, device=device) if log_filepath is not None: if successful_attack_flag: with mlogger.stdout_to(log_filepath): print('Image ' + str(i + 1) + ' was attacked successfully') else: with mlogger.stdout_to(log_filepath): print('Image ' + str(i + 1) + ' was NOT attacked successfully') else: if successful_attack_flag: print('Image ' + str(i + 1) + ' was attacked successfully') else: print('Image ' + str(i + 1) + ' was NOT attacked successfully') # If the attack was unsuccessful, increase the counter if successful_attack_flag: num_successful_attacks += 1 # Calculate the current attack success rate for the properties in the file provided attack_success_rate = 100.0 * num_successful_attacks / len(images) # If, the output filename was provided, store the time and corresponding current attack success rate in the # output dictionary if output_filename is not None: output_dict['times'].append(time.time() - start_time) output_dict['attack success rates'].append(attack_success_rate) # Finally, if the output filename was provided, store the output dictionary in the prescribed location in the # current folder if output_filename is not None: torch.save(output_dict, 'experiment_results/' + output_filename) # Otherwise, simply return the final attack success rate else: return attack_success_rate