def __init__(self, threat_model, perturbation_params): super(AdversarialPerturbation, self).__init__() self.threat_model = threat_model self.initialized = False self.perturbation_params = perturbation_params if isinstance(perturbation_params, tuple): self.use_gpu = perturbation_params[1].use_gpu or utils.use_gpu() else: self.use_gpu = perturbation_params.use_gpu or utils.use_gpu()
def __init__(self, attack_params, to_eval=None, manual_gpu=None): """ to_eval is a dict of {str : toEval methods}. """ self.attack_params = attack_params self.normalizer = attack_params.adv_attack_obj.normalizer if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() # First map shorthand strings to methods shorthand_evals = { 'top1': self.top1_accuracy, 'avg_successful_lpips': self.avg_successful_lpips, 'avg_successful_ssim': self.avg_successful_ssim, 'stash_perturbations': self.stash_perturbations, 'avg_loss_value': self.avg_loss_value } if to_eval is None: to_eval = {'top1': 'top1'} to_eval = dict(to_eval.items()) for key, val in list(to_eval.items()): if val in shorthand_evals: to_eval[key] = shorthand_evals[val] else: assert callable(val) to_eval[key] = functools.partial(val, self) self.to_eval = to_eval self.results = {k: None for k in self.to_eval} self.params = {k: None for k in self.to_eval}
def load_pretrained_mnist_cnn(return_normalizer=False, manual_gpu=None): """ Helper fxn to initialize/load the pretrained mnist cnn """ # Resolve load path weight_path = os.path.join(MNIST_WEIGHT_PATH, 'mnist.th') # Resolve CPU/GPU stuff if manual_gpu is not None: use_gpu = manual_gpu else: use_gpu = utils.use_gpu() if use_gpu: map_location = None else: map_location = (lambda s, l: s) state_dict = torch.load(weight_path) classifier_net = mnist_cnn.Net() classifier_net.load_state_dict(state_dict) if return_normalizer: normalizer = utils.DifferentiableNormalize(mean=MNIST_MEANS, std=MNIST_STDS) return classifier_net, normalizer return classifier_net
def __init__(self, *args, **kwargs): super(PerturbationParameters, self).__init__(*args, **kwargs) if kwargs.get('manual_gpu') is not None: self.use_gpu = kwargs['manual_gpu'] else: self.use_gpu = utils.use_gpu() self.__dict__ = self
def __init__(self, **kwargs): super(ParameterizedTransformation, self).__init__() if kwargs.get('manual_gpu', None) is not None: self.use_gpu = kwargs['manual_gpu'] else: self.use_gpu = utils.use_gpu()
def __init__(self, classifier_net, normalizer, threat_model, manual_gpu=None): """ Initializes things to hold to perform a single batch of adversarial attacks ARGS: classifier_net : nn.Module subclass - neural net that is the classifier we're attacking normalizer : DifferentiableNormalize object - object to convert input data to mean-zero, unit-var examples threat_model : ThreatModel object - object that allows us to create per-minibatch adversarial examples manual_gpu : None or boolean - if not None, we override the environment variable 'MISTER_ED_GPU' for how we use the GPU in this object """ self.classifier_net = classifier_net self.normalizer = normalizer or utils.IdentityNormalize() if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.validator = lambda *args: None self.threat_model = threat_model
def load_pretrained_imagenet(arch='nasnetalarge', return_normalizer=False, manual_gpu=None): assert arch in [ 'fbresnet152', 'bninception', 'resnext101_32x4d', 'resnext101_64x4d', 'inceptionv4', 'inceptionresnetv2', 'alexnet', 'densenet121', 'densenet169', 'densenet201', 'densenet161', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'inceptionv3', 'squeezenet1_0', 'squeezenet1_1', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19_bn', 'vgg19', 'nasnetalarge', 'nasnetamobile', 'cafferesnet101', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d' ] model = ptm.__dict__[arch](num_classes=1000, pretrained='imagenet') model.eval() if manual_gpu is not None: use_gpu = manual_gpu else: use_gpu = utils.use_gpu() if use_gpu: model.cuda() if return_normalizer: normalizer = normalizer_from_imagenet_model(model) return model, normalizer return model
def __init__(self, classifier_net, normalizer, manual_gpu=None): self.classifier_net = classifier_net self.normalizer = normalizer if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu()
def __init__(self, classifier_net, normalizer, manual_gpu=False): self.classifier_net = classifier_net self.normalizer = normalizer if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.results = {'top1': utils.AverageMeter()}
def load_mnist_data(train_or_val, extra_args=None, dataset_dir=None, batch_size=None, manual_gpu=None, shuffle=True): """ Builds a MNIST data loader for either training or evaluation of MNIST data. See the 'DEFAULTS' section in the fxn for default args ARGS: train_or_val: string - one of 'train' or 'val' for whether we should load training or validation datap extra_args: dict - if not None is the kwargs to be passed to DataLoader constructor dataset_dir: string - if not None is a directory to load the data from normalize: boolean - if True, we normalize the data by subtracting out means and dividing by standard devs manual_gpu : boolean or None- if None, we use the GPU if we can else, we use the GPU iff this is True shuffle: boolean - if True, we load the data in a shuffled order no_transform: boolean - if True, we don't do any random cropping/ reflections of the data """ ################################################################## # DEFAULTS # ################################################################## # dataset directory dataset_dir = dataset_dir or DEFAULT_DATASETS_DIR batch_size = batch_size or DEFAULT_BATCH_SIZE # Extra arguments for DataLoader constructor if manual_gpu is not None: use_gpu = manual_gpu else: use_gpu = utils.use_gpu() constructor_kwargs = { 'batch_size': batch_size, 'shuffle': shuffle, 'num_workers': DEFAULT_WORKERS, 'pin_memory': use_gpu } constructor_kwargs.update(extra_args or {}) transform_chain = transforms.Compose([transforms.ToTensor()]) # train_or_val validation assert train_or_val in ['train', 'val'] ################################################################## # Build DataLoader # ################################################################## return torch.utils.data.DataLoader( datasets.MNIST(root=dataset_dir, train=train_or_val == 'train', transform=transform_chain, download=True), **constructor_kwargs)
def load_imagenet_data(train_or_val, extra_args=None, dataset_dir=None, normalize=False, batch_size=None, manual_gpu=None, means=None, stds=None, shuffle=True, no_transform=False): ###################################################################### # DEFAULTS # ###################################################################### dataset_dir = dataset_dir or DEFAULT_DATASETS_DIR assert train_or_val in ['train', 'val'] image_folder = { 'val': 'ILSVRC2012_img_val' }[train_or_val] # error on train full_image_dir = os.path.join(dataset_dir, image_folder) transform_list = [ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor() ] if normalize: means = means or DEFAULT_MEANS stds = stds or DEFAULT_STDS normalizer = transforms.Normalize(mean=means, std=stds) transform_list.append(normalizer) if no_transform: transform = transforms.Compose([]) else: transform = transforms.Compose(transform_list) if manual_gpu is not None: use_gpu = manual_gpu else: use_gpu = utils.use_gpu() dataset = datasets.ImageFolder(full_image_dir, transform) ###################################################################### # Build DataLoader # ###################################################################### return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=DEFAULT_WORKERS, pin_memory=use_gpu)
def __init__(self, fix_im, **kwargs): super(LpipsRegularization, self).__init__(fix_im) manual_gpu = kwargs.get('manual_gpu', None) if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
def __init__(self, fix_im, **kwargs): super(SSIMRegularization, self).__init__(fix_im) if 'window_size' in kwargs: self.ssim_instance = ssim.SSIM(window_size=kwargs['window_size']) else: self.ssim_instance = ssim.SSIM() manual_gpu = kwargs.get('manual_gpu', None) if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu()
def __init__(self, classifier_net, normalizer, experiment_name, architecture_name, protocol=None, manual_gpu=None): """ ARGS: classifier_net : nn.Module subclass - instance of neural net to classify images. Can have already be trained, or not normalizer : DifferentiableNormalize - object to convert to zero-mean unit-variance domain experiment_name : String - human-readable name of the 'trained_model' (this is helpful for identifying checkpoints later) manual_gpu : None or bool - if not None is a manual override of whether or not to use the GPU. If left None, we use the GPU if we can ON NOMENCLATURE: Depending on verbosity levels, training checkpoints are saved after some training epochs. These are saved as '<experiment_name>/<architecture_name>/<epoch>.path.tar' Best practice is to keep architecture_name consistent across adversarially trained models built off the same architecture and having a descriptive experiment_name for each training instance """ self.classifier_net =classifier_net self.normalizer = normalizer self.experiment_name = experiment_name self.architecture_name = architecture_name if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.verbosity_level = None self.verbosity_minibatch = None self.verbosity_adv = None self.verbosity_epoch = None self.logger = utils.TrainingLogger() self.log_level = None self.log_minibatch = None self.log_adv = None self.log_epoch = None if protocol is None: protocol = TrainingProtocol('adv_and_orig', 0.1) self.protocol = protocol
def __init__(self, pnet_tune=False, use_dropout=False, manual_gpu=None): # HACKETY HACK -- MJ modified this file super(PNetLin, self).__init__() net_type = alexnet # ADD FREEDOM HERE LATER self.pnet_tune = pnet_tune self.chns = [64, 192, 384, 256, 256] if self.pnet_tune: self.net = net_type(requires_grad=self.pnet_tune) else: self.net = [ net_type(requires_grad=self.pnet_tune), ] # define the layers self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] # define transfrom to make mean 0, unit var self.shift = torch.autograd.Variable( torch.Tensor([-.030, -.088, -.188]).view(1, 3, 1, 1)) self.scale = torch.autograd.Variable( torch.Tensor([.458, .448, .450]).view(1, 3, 1, 1)) # cuda all the things if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() if self.use_gpu: if self.pnet_tune: self.net.cuda() else: self.net[0].cuda() self.shift = self.shift.cuda() self.scale = self.scale.cuda() self.lin0.cuda() self.lin1.cuda() self.lin2.cuda() self.lin3.cuda() self.lin4.cuda()
def __init__(self, classifier_net, normalizer, manual_gpu=None, loss_fxn=None): self.classifier_net = classifier_net self.normalizer = normalizer if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.loss_fxn = loss_fxn or nn.CrossEntropyLoss() self.results = { 'top1': utils.AverageMeter(), 'avg_loss_value': utils.AverageMeter() }
def load_pretrained_cifar_resnet(flavor=32, return_normalizer=False, manual_gpu=None): """ Helper fxn to initialize/load the pretrained cifar resnet """ # Resolve load path valid_flavor_numbers = [110, 1202, 20, 32, 44, 56] assert flavor in valid_flavor_numbers weight_path = os.path.join(RESNET_WEIGHT_PATH, 'cifar10_resnet%s.th' % flavor) # Resolve CPU/GPU stuff if manual_gpu is not None: use_gpu = manual_gpu else: use_gpu = utils.use_gpu() if use_gpu: map_location = None else: map_location = (lambda s, l: s) # need to modify the resnet state dict to be proper # TODO: LOAD THESE INTO MODEL ZOO bad_state_dict = torch.load(weight_path, map_location=map_location) correct_state_dict = {re.sub(r'^module\.', '', k): v for k, v in bad_state_dict['state_dict'].items()} classifier_net = eval("cifar_resnets.resnet%s" % flavor)() classifier_net.load_state_dict(correct_state_dict) if return_normalizer: normalizer = utils.DifferentiableNormalize(mean=CIFAR10_MEANS, std=CIFAR10_STDS) return classifier_net, normalizer return classifier_net
def __init__(self, manual_gpu=None): if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.Tensor = torch.cuda.FloatTensor if self.use_gpu else torch.Tensor