예제 #1
0
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model


if __name__ == "__main__":
    torch.manual_seed(123456)
    dataloaders, dataset_sizes = data_process_lisa(batch_size=128)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model_ft = Net()
    model_ft.apply(weights_init)
    #model_ft.load_state_dict(torch.load('../donemodel/'+args.model))
    model_ft.to(device)

    # model_ft = nn.DataParallel(model,device_ids=[0,1])
    # use multiple gpus

    criterion = nn.CrossEntropyLoss()

    optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.01)

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=10,
                                           gamma=0.1)

    model_ft = pgd_train_model(model_ft,
                               criterion,
예제 #2
0
    return max_delta


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='test')
    parser.add_argument("model", type=str, help="test_model")
    args = parser.parse_args()
    model = Net()
    model.load_state_dict(torch.load('../donemodel/' + args.model))

    print("test model is ", args.model)
    model.eval()
    batch_size = 1
    dataloaders, dataset_sizes = data_process_lisa(batch_size)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)

    eps = [0.5, 1, 1.5, 2, 2.5, 3]  # eps is epsilon of the l_2 bound
    alpha = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]  # alpha is learning rate
    itera = [20, 20, 20, 20, 20, 20]  # iterations to find optimal
    restart = [
        1, 1, 1, 1, 1, 1
    ]  # restart times, since we just do some standard check of our model,
    # we do not use mutliple restarts, but you can change that if you want
    # delete some hyperparmeters could speed up

    for i in range(len(eps)):
        correct = 0
        total = 0
        check = 0
        torch.manual_seed(12345)