Exemple #1
0
def trainModelLoss(dro_type,
                   epochs,
                   steps_adv,
                   budget,
                   activation,
                   batch_size,
                   loss_criterion,
                   cost_function=None):
    """
    Train a neural network with a specified loss function.
    """

    model = MNISTClassifier(activation=activation)
    if dro_type == 'PGD':
        train_module = ProjetcedDRO(model, loss_criterion)
    elif dro_type == 'Lag':
        assert cost_function is not None
        train_module = LagrangianDRO(model, loss_criterion, cost_function)
    elif dro_type == 'FW':
        train_module = FrankWolfeDRO(model, loss_criterion, p=2, q=2)
    else:
        raise ValueError("The type of DRO is not valid.")

    train_module.train(budget=budget,
                       batch_size=batch_size,
                       epochs=epochs,
                       steps_adv=steps_adv)
    folderpath = "./Loss_models/"
    filepath = folderpath + "{}_DRO_activation={}_epsilon={}_loss={}.pt".format(
        dro_type, activation, budget, loss_criterion.__name__)
    torch.save(model.state_dict(), filepath)
    print("A neural network adversarially trained using {} now saved at: {}".
          format(dro_type, filepath))
Exemple #2
0
def trainDROModel(dro_type, epochs, steps_adv, budget, activation, batch_size, loss_criterion, cost_function=None):
    """
    Train a neural network using one of the following DRO methods:
        - PGD
        - Lagrangian relaxation based method developed by Sinha et al. 
            This is also called WRM.
        - the Frank-Wolfe method based approach developed by Staib et al. 
    """

    model = MNISTClassifier(activation=activation)
    if dro_type == 'PGD':
        train_module = ProjetcedDRO(model, loss_criterion)
    elif dro_type == 'Lag':
        assert cost_function is not None
        train_module = LagrangianDRO(model, loss_criterion, cost_function)
    elif dro_type == 'FW':
        train_module = FrankWolfeDRO(model, loss_criterion, p=2, q=2)
    else:
        raise ValueError("The type of DRO is not valid.")

    train_module.train(budget=budget, batch_size=batch_size,
                       epochs=epochs, steps_adv=steps_adv)
    folderpath = "./DRO_models/"
    filepath = folderpath + \
        "{}_DRO_activation={}_epsilon={}.pt".format(
            dro_type, activation, budget)
    torch.save(model.state_dict(), filepath)
    print("A neural network adversarially trained using {} is now saved at {}.".format(
        dro_type, filepath))
 def initializeAnalyzers(dro_type, activation, budget):
     analyzers = []
     filepath = folderpath = "./Loss_models/"
     for i in range(1, 8):
         filepath = folderpath + "{}_DRO_activation={}_epsilon={}_loss={}.pt".format(
             dro_type, activation, budget, "f_{}".format(i))
         model = MNISTClassifier(activation=activation)
         analyzers.append(Analysis(model, filepath))
     return analyzers
        def initializeAnalyzers(dro_type, epsilon):
            """
            Initialize Analysis objects for neural networks trained by the
            Frank-Wolfe method and PGD
            """

            folderpath = "./DRO_models/"
            filepath_relu = folderpath + \
                "{}_DRO_activation={}_epsilon={}.pt".format(
                    dro_type, "relu", epsilon)
            filepath_elu = folderpath + \
                "{}_DRO_activation={}_epsilon={}.pt".format(
                    dro_type, "elu", epsilon)
            model_relu = MNISTClassifier(activation='relu')
            model_elu = MNISTClassifier(activation='elu')
            analyzer_relu = Analysis(model_relu, filepath_relu)
            analyzer_elu = Analysis(model_elu, filepath_elu)
            return analyzer_relu, analyzer_elu
    def __init__(self):
        model_relu = MNISTClassifier(activation='relu')
        model_elu = MNISTClassifier(activation='elu')
        model_sgd_relu = MNISTClassifier(activation='relu')
        model_sgd_elu = MNISTClassifier(activation='elu')

        # These file paths only work on UNIX.
        folderpath = "./ERM_models/"
        filename_relu = "MNISTClassifier_adam_relu.pt"
        filename_elu = "MNISTClassifier_adam_elu.pt"
        filename_sgd_relu = "MNISTClassifier_sgd_relu.pt"
        filename_sgd_elu = "MNISTClassifier_sgd_elu.pt"

        self.analyzer_relu = Analysis(model_relu, folderpath + filename_relu)
        self.analyzer_elu = Analysis(model_elu, folderpath + filename_elu)
        self.analyzer_sgd_relu = Analysis(model_sgd_relu,
                                          folderpath + filename_sgd_relu)
        self.analyzer_sgd_elu = Analysis(model_sgd_elu,
                                         folderpath + filename_sgd_elu)
        def initializeLagAnalyzers():
            """
            Initialize Analysis objects for neural networks trained by the DRO
            algorithm proposed by Sinha et al.
            """

            folderpath = "./DRO_models/"
            Lag_relu_analyzers = []
            Lag_elu_analyzers = []
            length = len(self.gammas)
            for i in range(length):
                gamma = self.gammas[i]
                filepath_relu = folderpath + \
                    "{}_DRO_activation={}_epsilon={}.pt".format(
                        "Lag", "relu", gamma)
                filepath_elu = folderpath + \
                    "{}_DRO_activation={}_epsilon={}.pt".format(
                        "Lag", "elu", gamma)
                model_relu = MNISTClassifier(activation='relu')
                model_elu = MNISTClassifier(activation='elu')
                Lag_relu_analyzers.append(Analysis(model_relu, filepath_relu))
                Lag_elu_analyzers.append(Analysis(model_elu, filepath_elu))
            return Lag_relu_analyzers, Lag_elu_analyzers
Exemple #7
0
        if final_pred.item() == target.item():
            correct += 1

    # Calculate final accuracy for this epsilon
    final_acc = correct / float(len(test_loader))
    print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(
        epsilon, correct, len(test_loader), final_acc))


if __name__ == "__main__":
    # MNIST Test dataset and dataloader declaration
    test_loader = retrieveMNISTTestData(batch_size=1, shuffle=True)

    # Define what device we are using
    print("CUDA Available: ", torch.cuda.is_available())
    device = torch.device("cuda" if (
        use_cuda and torch.cuda.is_available()) else "cpu")

    # Initialize the network
    filepath_relu = "./experiment_models/MNISTClassifier_relu.pt"
    model_relu = MNISTClassifier(activation='relu')
    model_relu = loadModel(model_relu, filepath_relu)
    model_relu.to(device)

    # Set the model in evaluation mode. In this case this is for the Dropout layers
    model_relu.eval()

    # Run test for each epsilon
    for eps in epsilons:
        test(model_relu, device, test_loader, eps)
Exemple #8
0
                         epsilon=max_epsilon,
                         stepsize=max_epsilon / 5,
                         iterations=15)

        total += 1
        if image_adv is not None:
            wrong += 1
        if i % period == period - 1:
            print("Cumulative adversarial attack success rate: {} / {} = {}".
                  format(wrong, total, wrong / total))
    print("Adversarial error rate: {} / {} = {}".format(
        wrong, total, wrong / total))


if __name__ == "__main__":
    model_relu = MNISTClassifier(activation='relu')
    model_elu = MNISTClassifier(activation='elu')

    # These file paths only work on UNIX.
    filepath_relu = "./ERM_models/MNISTClassifier_relu.pt"
    filepath_elu = "./ERM_models/MNISTClassifier_elu.pt"
    model_relu = loadModel(model_relu, filepath_relu)
    model_elu = loadModel(model_relu, filepath_elu)

    # Display the architecture of the neural network
    #summary(model_relu.cuda(), (1, 28, 28))

    print("The result of relu is as follows.")
    adversarialAccuracy(model_relu)
    print("The result of elu is as follows.")
    adversarialAccuracy(model_elu)