def defense(self, train_loader=None, validation_loader=None): """ :param train_loader: :param validation_loader: :return: """ best_val_acc = None for epoch in range(self.num_epochs): # training the model with natural examples and corresponding adversarial examples self.train_one_epoch_with_adv_and_nat(train_loader=train_loader, epoch=epoch) val_acc = validation_evaluation( model=self.model, validation_loader=validation_loader, device=self.device) # adjust the learning rate for cifar10 if self.Dataset == 'CIFAR10': adjust_learning_rate(epoch=epoch, optimizer=self.optimizer) # save the retrained defense-enhanced model assert os.path.exists('../DefenseEnhancedModels/{}'.format( self.defense_name)) defense_enhanced_saver = '../DefenseEnhancedModels/{}/{}_{}_enhanced.pt'.format( self.defense_name, self.Dataset, self.defense_name) if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4): if best_val_acc is not None: os.remove(defense_enhanced_saver) best_val_acc = val_acc self.model.save(name=defense_enhanced_saver) else: print( 'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n' .format(epoch, best_val_acc))
def defense(self, train_loader=None, valid_loader=None): transformed_train_data_numpy, transformed_train_label_numpy = self.transforming_dataset( train_loader) transformed_val_data_numpy, transformed_val_label_numpy = self.transforming_dataset( valid_loader) transformed_train_dataset = TransformedDataset( images=torch.from_numpy(transformed_train_data_numpy), labels=torch.from_numpy(transformed_train_label_numpy), dataset=self.Dataset, transform=self.transform) transformed_train_loader = torch.utils.data.DataLoader( transformed_train_dataset, batch_size=self.batch_size, shuffle=True) transformed_val_dataset = TransformedDataset( images=torch.from_numpy(transformed_val_data_numpy), labels=torch.from_numpy(transformed_val_label_numpy), dataset=self.Dataset, transform=None) transformed_val_loader = torch.utils.data.DataLoader( transformed_val_dataset, batch_size=self.batch_size, shuffle=False) best_val_acc = None for epoch in range(self.num_epochs): train_one_epoch(model=self.model, train_loader=transformed_train_loader, optimizer=self.optimizer, epoch=epoch, device=self.device) val_acc = validation_evaluation( model=self.model, validation_loader=transformed_val_loader, device=self.device) if self.Dataset == 'CIFAR10': adjust_learning_rate(epoch=epoch, optimizer=self.optimizer) # save the retrained defense-enhanced model assert os.path.exists('../DefenseEnhancedModels/{}'.format( self.defense_name)) defense_enhanced_saver = '../DefenseEnhancedModels/{}/{}_{}_enhanced.pt'.format( self.defense_name, self.Dataset, self.defense_name) if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4): if best_val_acc is not None: os.remove(defense_enhanced_saver) best_val_acc = val_acc self.model.save(name=defense_enhanced_saver) else: print( 'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n' .format(epoch, best_val_acc))
def defense(self, train_loader=None, validation_loader=None): best_val_acc = None for epoch in range(self.num_epochs): # training the model using input gradient regularization self.train_one_epoch_with_lambda_regularization(train_loader=train_loader, epoch=epoch) val_acc = validation_evaluation(model=self.model, validation_loader=validation_loader, device=self.device) # save the retained defense-enhanced model assert os.path.exists('../DefenseEnhancedModels/{}'.format(self.defense_name)) defense_enhanced_saver = '../DefenseEnhancedModels/{}/{}_{}_enhanced.pt'.format(self.defense_name, self.Dataset, self.defense_name) if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4): if best_val_acc is not None: os.remove(defense_enhanced_saver) best_val_acc = val_acc self.model.save(name=defense_enhanced_saver) else: print('Train Epoch{:>3}: validation dataset accuracy of did not improve from {:.4f}\n'.format(epoch, best_val_acc))
def search_best_radius(self, validation_loader=None, radius_min=0.0, radius_max=1.0, radius_step=0.01): """ :param validation_loader: :param radius_min: :param radius_max: :param radius_step: :return: """ self.model.eval() with torch.no_grad(): # compute the original classification accuracy on validation dataset val_acc = validation_evaluation(model=self.model, validation_loader=validation_loader, device=self.device) print('<--- original classification accuracy on validation dataset is {:.4f} --->'.format(val_acc)) # learn the radius through a search process total_step = int((radius_max - radius_min) / radius_step) for index in range(total_step): # update the radius tmp_radius = radius_min + radius_step * (index + 1) # calculate the accuracy of region-based classification on validation dataset total = 0.0 correct = 0.0 for images, labels in validation_loader: rc_preds = self.region_based_classification(samples=images, radius=tmp_radius) rc_labels = torch.from_numpy(rc_preds) correct += (rc_labels == labels).sum().item() total += labels.size(0) rc_acc = correct / total print('\tcurrent radius is {:.2f}, validation accuracy is {:.1f}/{:.1f}={:.5f}'.format(tmp_radius, correct, total, rc_acc)) if (val_acc - rc_acc) > 1e-2: return round(tmp_radius - radius_step, 2) return radius_max
def main(args): # Device configuration os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Set the random seed manually for reproducibility. torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # get the training and testing dataset loaders train_loader, valid_loader = get_mnist_train_validate_loader( dir_name='./MNIST/', batch_size=MNIST_Training_Parameters['batch_size'], valid_size=0.1, shuffle=True) test_loader = get_mnist_test_loader( dir_name='./MNIST/', batch_size=MNIST_Training_Parameters['batch_size']) # set up the model and optimizer mnist_model = MNISTConvNet().to(device) optimizer = optim.SGD(mnist_model.parameters(), lr=MNIST_Training_Parameters['learning_rate'], momentum=MNIST_Training_Parameters['momentum'], weight_decay=MNIST_Training_Parameters['decay'], nesterov=True) # Training best_val_acc = None model_saver = './MNIST/model/MNIST_' + 'raw' + '.pt' for epoch in range(MNIST_Training_Parameters['num_epochs']): # training the model within one epoch train_one_epoch(model=mnist_model, train_loader=train_loader, optimizer=optimizer, epoch=epoch, device=device) # validation val_acc = validation_evaluation(model=mnist_model, validation_loader=valid_loader, device=device) if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4): if best_val_acc is not None: os.remove(model_saver) best_val_acc = val_acc mnist_model.save(name=model_saver) else: print( 'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n' .format(epoch, best_val_acc)) # Testing final_model = copy.deepcopy(mnist_model) final_model.load(path=model_saver, device=device) accuracy = testing_evaluation(model=final_model, test_loader=test_loader, device=device) print( 'Finally, the ACCURACY of saved model [{}] on testing dataset is {:.2f}%\n' .format(final_model.model_name, accuracy * 100.0))
def train_external_model_group(self, train_loader=None, validation_loader=None): """ :param train_loader: :param validation_loader: :return: """ # Set up the model group with 4 static external models if self.Dataset == 'MNIST': model_group = [MNIST_A(), MNIST_B(), MNIST_C(), MNIST_D()] else: model_group = [CIFAR10_A(), CIFAR10_B(), CIFAR10_C(), CIFAR10_D()] model_group = [model.to(self.device) for model in model_group] # training the models in model_group one by one for i in range(len(model_group)): # prepare the optimizer for MNIST if self.Dataset == "MNIST": optimizer_external = optim.SGD( model_group[i].parameters(), lr=self.training_parameters['learning_rate'], momentum=self.training_parameters['momentum'], weight_decay=self.training_parameters['decay'], nesterov=True) # prepare the optimizer for CIFAR10 else: if i == 3: optimizer_external = optim.SGD(model_group[i].parameters(), lr=0.001, momentum=0.9, weight_decay=1e-6) else: optimizer_external = optim.Adam( model_group[i].parameters(), lr=self.training_parameters['lr']) print('\nwe are training the {}-th static external model ......'. format(i)) best_val_acc = None for index_epoch in range(self.num_epochs): train_one_epoch(model=model_group[i], train_loader=train_loader, optimizer=optimizer_external, epoch=index_epoch, device=self.device) val_acc = validation_evaluation( model=model_group[i], validation_loader=validation_loader, device=self.device) if self.Dataset == 'CIFAR10': adjust_learning_rate(epoch=index_epoch, optimizer=optimizer_external) assert os.path.exists('../DefenseEnhancedModels/{}'.format( self.defense_name)) defense_external_saver = '../DefenseEnhancedModels/{}/{}_EAT_{}.pt'.format( self.defense_name, self.Dataset, str(i)) if not best_val_acc or round(val_acc, 4) >= round( best_val_acc, 4): if best_val_acc is not None: os.remove(defense_external_saver) best_val_acc = val_acc model_group[i].save(name=defense_external_saver) else: print( 'Train Epoch {:>3}: validation dataset accuracy did not improve from {:.4f}\n' .format(index_epoch, best_val_acc))