def code_test(): from base_model.cifar_resnet18 import cifar_resnet18 from dataset import Dataset from my_snip.base import EpochDataset ds_val = Dataset(dataset_name = 'val') ds_val.load() ds_val = EpochDataset(ds_val) model_path = '../exps/exp0/checkpoint.pth.tar' net = cifar_resnet18() checkpoint = torch.load(model_path) net.load_state_dict(checkpoint['state_dict']) net.cuda() epoch = next(ds_val.epoch_generator()) evalRoboustness(net, epoch)
def test_IPGD(): model_path = './exps/exp0/model_best.pth.tar' from base_model.cifar_resnet18 import cifar_resnet18 from dataset import Dataset from my_snip.base import EpochDataset pgd = IPGD() net = cifar_resnet18() net.load_state_dict(torch.load(model_path)['state_dict']) net.cuda() ds_train = Dataset(dataset_name='train') ds_train.load() ds_train = EpochDataset(ds_train) epoch = next(ds_train.epoch_generator()) count = 0 for mn_batch in epoch: count += 1 if count > 100: break data = mn_batch['data'] label = mn_batch['label'] #print(data.shape) data = torch.tensor(data, dtype=torch.float32).cuda() label = torch.tensor(label, dtype=torch.int64).cuda() acc = pgd.get_batch_accuracy(net, data, label) print('acc: {:.2f}%'.format(acc))
[100, 0.01], [100, 0.001] ] get_learing_rate = MultiStageLearningRatePolicy(learning_rate_policy) def adjust_learning_rate(optimzier, epoch): #global get_lea lr = get_learing_rate(epoch) for param_group in optimizer.param_groups: param_group['lr'] = lr torch.backends.cudnn.benchmark = True ds_train = Dataset(dataset_name = 'train') ds_train.load() ds_train = EpochDataset(ds_train) ds_val = Dataset(dataset_name = 'val') ds_val.load() ds_val = EpochDataset(ds_val) net = cifar_resnet18() net.cuda() criterion = nn.CrossEntropyLoss().cuda() optimizer = optim.SGD(net.parameters(), lr = get_learing_rate(0), momentum = 0.9, weight_decay=args.weight_decay) PgdAttack = IPGD(eps = args.eps, sigma = args.eps // 2, nb_iter = args.iter, norm = np.inf) best_prec = 0.0 if args.resume: