Пример #1
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='ori_model')
    parser.add_argument("model", type=str, help="ori_model")
    parser.add_argument("-eps", type=int, help="eps")
    parser.add_argument("-alpha", type=int, help="alpha")
    parser.add_argument("-iters", type=int, help="iters")
    parser.add_argument("-out", type=int, help="out")
    parser.add_argument("-epochs", type=int, help="epochs")
    args = parser.parse_args()

    torch.manual_seed(123456)
    dataloaders, dataset_sizes = data_process(batch_size=64)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model_ft = VGG_16()
    model_ft.load_weights()  #use this to do the adversarial training
    #model_ft.load_state_dict(torch.load('../donemodel/'+args.model))
    #use this to do the curriculum adversarial training

    model_ft.to(device)

    # model_ft = nn.DataParallel(model,device_ids=[0,1])

    print("eps is ", args.eps, "  ", "alpha is ", args.alpha, "  ",
          "iteration is ", args.iters, "  ", "out is", args.out, "  ",
          "epochs is ", args.epochs)

    criterion = nn.CrossEntropyLoss()

    optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.0001)
Пример #2
0
    parser.add_argument("--N",
                        type=int,
                        default=1000,
                        help="number of samples to use")
    parser.add_argument("--alpha",
                        type=float,
                        default=0.001,
                        help="failure probability")
    args = parser.parse_args()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)
    batch_size = 1
    dataloaders, dataset_sizes = data_process(batch_size)
    model = VGG_16()
    model.load_state_dict(torch.load('../donemodel/' + args.model))
    model.to(device)
    smoothed_classifier = Smooth(model, 10, args.sigma)

    eps = [0.5, 1, 1.5, 2, 2.5, 3]  # eps is epsilon of the l_2 bound
    alpha = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]  # alpha is learning rate
    itera = [20, 20, 20, 20, 20, 20]  # iterations to find optimal
    restart = [
        1, 1, 1, 1, 1, 1
    ]  # restart times, since we just do some standard check of our model,
    # we do not use mutliple restarts, but you can change that if you want
    # delete some hyperparmeters could speed up

    for i in range(len(eps)):
        cor = 0
Пример #3
0
        #patch_size = opt.patch_size
        image_size = opt.image_size
        plot_all = opt.plot_all

        print("=> creating model ")
        netClassifier_par = {
            'input_size': [3, 224, 224],
            'input_range': [0, 255],
            'mean':
            [0.367035294117647, 0.41083294117647057, 0.5066129411764705],
            'std': [1 / 255, 1 / 255, 1 / 255],
            'num_classes': 10,
            'input_space': "RGB"
        }

        netClassifier = VGG_16()
        netClassifier.load_state_dict(torch.load('../donemodel/' + opt.model))

        if torch.cuda.is_available():
            netClassifier.cuda()

        print('==> Preparing data..')
        data_dir = '../Data'
        normalize = transforms.Normalize(
            mean=[0.367035294117647, 0.41083294117647057, 0.5066129411764705],
            std=[1 / 255, 1 / 255, 1 / 255])

        train_loader = torch.utils.data.DataLoader(dset.ImageFolder(
            os.path.join(data_dir, 'Train_patch'),
            transforms.Compose([
                transforms.Resize(