def setUp(self): TorchUtils.set_rnd_seed(1) self.general_args = { 'experiment_type': 'mnist_adversarial', 'output_root': './src/tests/logs', 'param_file_path': None } self.params_overload = {"adv_attack_test": dict()} self.test_name = self.id().split('.')[-1] logger_utilities.init_logger( logger_name=self.test_name, output_root=self.general_args['output_root']) self.logger = logger_utilities.get_logger()
import jsonargparse import os import torch import time import logger_utilities from experimnet_utilities import Experiment from utilities import TorchUtils from adversarial.attacks import get_attack TorchUtils.set_rnd_seed(1) # Uncomment for performance. Comment for debug and reproducibility # torch.backends.cudnn.enable = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False import json def eval_adversarial_dataset(model, dataloader, attack, save_adv_sample: bool = False): """ Evaluate model performance on dataloader with attack :param model: :param dataloader: :param attack: :param save_adv_sample: :return: """ try: torch.cuda.reset_max_memory_allocated()