if __name__ == "__main__": args = pblm.argparser(epsilon=0.0347, starting_epsilon=0.001, batch_size=50, opt='sgd', lr=0.05) print("saving file to {}".format(args.prefix)) setproctitle.setproctitle(args.prefix) train_log = open(args.prefix + "_train.log", "w") test_log = open(args.prefix + "_test.log", "w") train_loader, test_loader = pblm.cifar_loaders(args.batch_size) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) random.seed(0) numpy.random.seed(0) sampler_indices = [] model = [select_model(args.model)] kwargs = pblm.args2kwargs(args) best_err = 1 for _ in range(0, args.cascade): if _ > 0: # reduce dataset to just uncertified examples
'Targeted epsilon: {3}\t\t' 'Starting epsilon: {4}\t\t' 'Sechduled length: {5}'.format(args.type, args.category, args.epochs, args.epsilon, args.starting_epsilon, args.schedule_length), end='\n') if args.l1_proj is not None: print('Projection vectors: {0}\t\t' 'Train estimate: {1}\t\t' 'Test estimate: {2}'.format(args.l1_proj, args.l1_train, args.l1_test), end='\n') # train-validation split train_loader, _, _ = pblm.cifar_loaders(batch_size=args.batch_size, path='../data', ratio=args.ratio, seed=args.seed) _, valid_loader, test_loader = pblm.cifar_loaders(batch_size=1, path='../data', ratio=args.ratio, seed=args.seed) model = select_model(args.model) num_classes = model[-1].out_features # specify the task and the corresponding class semantic folder_path = os.path.dirname(args.proctitle) if args.type == 'binary': input_mat = np.zeros((num_classes, num_classes), dtype=np.int) if args.category == 'single_pair': seed_clas = 3 targ_clas = 1
from torch.utils.data import DataLoader from torch.utils.data.sampler import SubsetRandomSampler from torchsummary import summary import problems as pblm from trainer import * torch.manual_seed(1) torch.cuda.manual_seed_all(1) random.seed(0) np.random.seed(0) gthreshold = 0.5 # neuron coverage threshold globalcoverage = [] # [{file, label, layercoverage, yhat}] hook_layer_count = 0 train_loader, test_loader = pblm.cifar_loaders(64) def select_model(m): if m == 'large': # raise ValueError model = pblm.cifar_model_large().cuda() elif m == 'resnet': model = pblm.cifar_model_resnet(N=args.resnet_N, factor=args.resnet_factor).cuda() else: model = pblm.cifar_model().cuda() summary(model, (3, 32, 32)) return model def get_yhats_test(): model = select_model('large') model_name = 'model/cifar_large_robust_new.h5'
saved_filepath = ('./saved_log/' + args.proctitle) model_filepath = os.path.dirname('./models/' + args.proctitle) if not os.path.exists(saved_filepath): os.makedirs(saved_filepath) if not os.path.exists(model_filepath): os.makedirs(model_filepath) model_path = ('./models/' + args.proctitle + '.pth') train_res = open(saved_filepath + '/train_res.txt', "w") test_res = open(saved_filepath + '/test_res.txt', "w") # load the data if args.prefix == "imagenet": train_loader, _ = pblm.cifar_loaders( args.batch_size, '../data/cifar10', ) _, test_loader = pblm.cifar_loaders(args.batch_size_test, '../data/cifar10') elif args.prefix == "custom_imagenet": train_loader, _ = pblm.custom_cifar_loaders( batch_size=args.batch_size, train_path='../imagenet_gen/data/imagenet/' + args.gan_type + '/train.npz', test_path='../imagenet_gen/data/imagenet/' + args.gan_type + '/test.npz') _, test_loader = pblm.custom_cifar_loaders( batch_size=args.batch_size_test, train_path='../imagenet_gen/data/imagenet/' + args.gan_type + '/train.npz',
return model torch.manual_seed(0) torch.cuda.manual_seed_all(0) random.seed(0) numpy.random.seed(0) if __name__ == "__main__": args = pblm.argparser_evaluate(epsilon = 0.0347, norm='l1') print("saving file to {}".format(args.output)) setproctitle.setproctitle(args.output) test_log = open(args.output, "w") _, test_loader = pblm.cifar_loaders(1) d = torch.load(args.load) model = [] for sd in d['state_dict']: m = select_model(args.model) m.load_state_dict(sd) model.append(m) best_err = 1 epsilon = args.epsilon # robust cascade training err = evaluate_robust_cascade(test_loader, model,
lr=0.05) args.method = method args.batch_size = 50 args.test_batch_size = 20 args.prefix = "test3/" + args.method + "" args.epochs = 50 print(args) print("saving file to {}".format(args.prefix)) setproctitle.setproctitle(args.prefix) train_log = open(args.prefix + "_train.log", "w") baseline_test_log = open(args.prefix + "_baseline_test.log", "w") madry_test_log = open(args.prefix + "_madry_test.log", "w") robust_test_log = open(args.prefix + "_robust_test.log", "w") full_test_log = open(args.prefix + "_full_test.log", "w") train_loader, _ = pblm.cifar_loaders(args.batch_size) # For robust training since it requires more memory. # This may be unfair train_loader2, _ = pblm.cifar_loaders(20) _, test_loader = pblm.cifar_loaders(args.test_batch_size) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) random.seed(0) numpy.random.seed(0) sampler_indices = [] model = [select_model(args.model)] kwargs = pblm.args2kwargs(args) best_err = 1