in_bin = conf_lst.gt(bin_lower.item()) * conf_lst.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_in_bin = acc_lst[in_bin].float().mean() avg_confidence_in_bin = conf_lst[in_bin].mean() ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin return ece.item() if __name__ == "__main__": D_t_loader, D_s_t_loader, D_s_u_loader, D_u_loader, probe_imgs, probe_labels, test_loader = load_data( ) net_shadow, net_target = models.SimpleCNN(n_classes), models.SimpleCNN( n_classes) opt_shadow = torch.optim.SGD(net_shadow.parameters(), lr=lr, momentum=momentum) opt_target = torch.optim.SGD(net_target.parameters(), lr=lr, momentum=momentum) net_target = nn.DataParallel(net_target).cuda() net_shadow = nn.DataParallel(net_shadow).cuda() loss = nn.CrossEntropyLoss().cuda() ''' best_model = train(net_target, D_t_loader, test_loader, opt_target, loss, n_epochs) torch.save({'state_dict':best_model.state_dict()}, os.path.join(save_path, "SimpleCNN_{}_target.tar".format(model_v))) best_model = train(net_shadow, D_s_t_loader, test_loader, opt_shadow, loss, n_epochs)
in_bin = conf_lst.gt(bin_lower.item()) * conf_lst.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_in_bin = acc_lst[in_bin].float().mean() avg_confidence_in_bin = conf_lst[in_bin].mean() ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin return ece.item() if __name__ == "__main__": D_t_loader, D_s_t_loader, D_s_u_loader, D_u_loader, probe_imgs, probe_labels, test_loader = load_data( dataset, batch_size=batch_size) if dataset == "CIFAR10": net_shadow = models.SimpleCNN(n_classes) net_target = models.SimpleCNN(n_classes) else: net_shadow = models.MCNN(n_classes) net_target = models.MCNN(n_classes) opt_shadow = torch.optim.SGD(net_shadow.parameters(), lr=lr, momentum=momentum) opt_target = torch.optim.SGD(net_target.parameters(), lr=lr, momentum=momentum) net_shadow = nn.DataParallel(net_shadow).cuda() net_target = nn.DataParallel(net_target).cuda() loss = nn.CrossEntropyLoss(reduction='none').cuda()
attack_acc = 0.0 for i in range(10): train_attack(attack_net, train_data, train_label, test_data, test_label, optimizer, criterion) attack_acc += eval_attack(attack_net, test_data, test_label) attack_acc /= 10 print("Attack acc:{:.2f}".format(attack_acc)) return attack_acc if __name__ == "__main__": if model_v.startswith("vib"): shadow_net = models.SimpleCNN_VIB(n_classes) else: shadow_net = models.SimpleCNN(n_classes) shadow_net = nn.DataParallel(shadow_net).cuda() shadow_ckp = torch.load(shadow_path)['state_dict'] load_state_dict(shadow_net, shadow_ckp) if model_v.startswith("vib"): target_net = models.SimpleCNN_VIB(n_classes) else: target_net = models.SimpleCNN(n_classes) target_net = nn.DataParallel(target_net).cuda() target_ckp = torch.load(target_path)['state_dict'] load_state_dict(target_net, target_ckp) criterion = nn.CrossEntropyLoss().cuda() __, __, D_s_u_loader, D_u_loader, probe_imgs, probe_labels, __ = load_data( batch_size=batch_size)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='IP', type=str, help='Dataset') parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') parser.add_argument('--epochs', default=600, type=int, help='Number of total epochs') parser.add_argument('--components', default=1, type=int, help='Dimensionality reduction') parser.add_argument('--spatialsize', default=23, type=int, help='Patch size') parser.add_argument('--tr_percent', default=0.10, type=float, metavar='N', help='Train set size') parser.add_argument('--val_percent', default=0.1, type=float, metavar='N', help='Train set size') parser.add_argument('--tr_bsize', default=100, type=int, metavar='N', help='Train batch size') parser.add_argument('--val_bsize', default=5000, type=int, metavar='N', help='Test batch size') parser.add_argument('--te_bsize', default=5000, type=int, metavar='N', help='Test batch size') parser.add_argument("--verbose", action='store_true', help="Verbose? Default NO") parser.add_argument("--use_val", action='store_true', help="Validation? Default NO") parser.add_argument('--p', default=0, type=float, help='Occlusion probability') parser.add_argument('--sh', default=0.3, type=float, help='Max occlusion area') parser.add_argument('--r1', default=0.2, type=float, help='Aspect of occlusion area') args = parser.parse_args() state = {k: v for k, v in args._get_kwargs()} trainloader, valloader, testloader, num_classes, bands = load_hyper(args) # Use CUDA use_cuda = torch.cuda.is_available() if use_cuda: torch.backends.cudnn.benchmark = True model = models.SimpleCNN(bands, args.spatialsize, num_classes) if use_cuda: model = model.cuda() criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[300, 400, 500], gamma=0.5) best_acc = -1 for epoch in range(args.epochs): train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda) if args.use_val: val_loss, val_acc = test(valloader, model, criterion, epoch, use_cuda) else: val_loss, val_acc = test(testloader, model, criterion, epoch, use_cuda) if args.verbose: print("EPOCH ["+str(epoch)+"/"+str(args.epochs)+"] TRAIN LOSS", train_loss, \ "TRAIN ACCURACY", train_acc, \ "LOSS", val_loss, "ACCURACY", val_acc) # save model if val_acc > best_acc: state = { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'acc': val_acc, 'best_acc': best_acc, 'optimizer' : optimizer.state_dict(), } torch.save(state, "best_model"+str(args.p)+".pth.tar") best_acc = val_acc scheduler.step() checkpoint = torch.load("best_model"+str(args.p)+".pth.tar") best_acc = checkpoint['best_acc'] start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda) if args.verbose: print("FINAL: LOSS", test_loss, "ACCURACY", test_acc) classification, confusion, results = auxil.reports(np.argmax(predict(testloader, model, criterion, use_cuda), axis=1), np.array(testloader.dataset.__labels__()), args.dataset) print(args.dataset, results)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='IP', type=str, help='the path to your dataset') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate of adam optimizer') parser.add_argument('--epochs', default=600, type=int, help='number of total epochs') parser.add_argument('--batch_size', default=100, type=int, help='batch size') parser.add_argument('--components', default=1, type=int, help='dimensionality reduction') parser.add_argument('--spatialsize', default=23, type=int, help='windows size') parser.add_argument('--tr_percent', default=0.10, type=float, metavar='N', help='train set size') parser.add_argument('--tr_bsize', default=100, type=int, metavar='N', help='train batchsize') parser.add_argument('--te_bsize', default=5000, type=int, metavar='N', help='test batchsize') parser.add_argument('--p', default=0, type=float, help='Random Erasing probability') parser.add_argument('--sh', default=0.3, type=float, help='max erasing area') parser.add_argument('--r1', default=0.2, type=float, help='aspect of erasing area') parser.add_argument("--verbose", action='store_true', help="Verbose? Default NO") args = parser.parse_args() state = {k: v for k, v in args._get_kwargs()} trainloader, testloader, num_classes, bands = load_hyper(args) # Use CUDA use_cuda = torch.cuda.is_available() if use_cuda: torch.backends.cudnn.benchmark = True model = models.SimpleCNN(bands, args.spatialsize, num_classes) if use_cuda: model = model.cuda() criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters()) title = 'HYPER-' + args.dataset best_acc = -1 for epoch in range(args.epochs): #adjust_learning_rate(optimizer, epoch) if args.verbose: print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr'])) train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda) test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda) if args.verbose: print("EPOCH", epoch, "TRAIN LOSS", train_loss, "TRAIN ACCURACY", train_acc, end=',') if args.verbose: print("LOSS", test_loss, "ACCURACY", test_acc) # save model if test_acc > best_acc: state = { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'acc': test_acc, 'best_acc': best_acc, 'optimizer': optimizer.state_dict(), } torch.save(state, "best_model" + str(args.p) + ".pth.tar") best_acc = test_acc checkpoint = torch.load("best_model" + str(args.p) + ".pth.tar") best_acc = checkpoint['best_acc'] start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda) if args.verbose: print("FINAL: LOSS", test_loss, "ACCURACY", test_acc) classification, confusion, results = auxil.reports( np.argmax(predict(testloader, model, criterion, use_cuda), axis=1), np.array(testloader.dataset.__labels__()), args.dataset) print(args.dataset, results)