def main():
    keras.backend.set_image_dim_ordering('th')

    # We can't use argparse in a test because it reads the arguments to nosetests
    # e.g., nosetests -v passes the -v to the test
    args = {
            "batch_size": 128,
            "nb_epochs": 2,
            "learning_rate": .5
            }

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    X_train = X_train[:10000]
    Y_train = Y_train[:10000]
    X_test = X_test[:2000]
    Y_test = Y_test[:2000]

    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    x = T.tensor4('x')
    y = T.matrix('y')

    # Define Theano model graph
    model = cnn_model()
    model.build(x_shape)
    predictions = model(x)
    print("Defined Theano model graph.")

    # Train an MNIST model
    th_model_train(x, y, predictions, model.trainable_weights,
                   X_train, Y_train, args=args)

    accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)

    assert accuracy > 0.8, accuracy
Пример #2
0
def main():
    """
    Test the accuracy of the MNIST cleverhans tutorial model
    :return:
    """

    if not hasattr(backend, "theano"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the Theano backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'th':
        keras.backend.set_image_dim_ordering('th')
        print(
            "INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to 'tf', temporarily setting to 'th'"
        )

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size',
                        '-b',
                        default=128,
                        help='Size of training batches')
    parser.add_argument('--train_dir',
                        '-d',
                        default='/tmp',
                        help='Directory storing the saved model.')
    parser.add_argument('--filename',
                        '-f',
                        default='mnist.ckpt',
                        help='Filename to save model under.')
    parser.add_argument('--nb_epochs',
                        '-e',
                        default=6,
                        type=int,
                        help='Number of epochs to train model')
    parser.add_argument('--learning_rate',
                        '-lr',
                        default=0.5,
                        type=float,
                        help='Learning rate for training')
    args = parser.parse_args()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    y_shape = (None, 10)
    x = T.tensor4('x')
    y = T.matrix('y')

    # Define Theano model graph
    model = model_mnist()
    model.build(x_shape)
    predictions = model(x)
    print("Defined Theano model graph.")

    # Train an MNIST model
    th_model_train(x,
                   y,
                   predictions,
                   model.trainable_weights,
                   X_train,
                   Y_train,
                   args=args)

    # Evaluate the accuracy of the MNIST model on legitimate test examples
    accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
    assert float(accuracy) >= 0.98, accuracy

    # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
    adv_x = fgsm(x, predictions, eps=0.3, back='th')
    X_test_adv, = batch_eval([x], [adv_x], [X_test], args=args)
    assert X_test_adv.shape[0] == 10000, X_test_adv.shape

    # Evaluate the accuracy of the MNIST model on adversarial examples
    accuracy = th_model_eval(x, y, predictions, X_test_adv, Y_test, args=args)
    assert float(accuracy) <= 0.1, accuracy
Пример #3
0
print("Defined Theano model graph.")


def evaluate():
    # Evaluate the accuracy of the CIFAR model on legitimate test examples
    accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
    assert X_test.shape[0] == 10000, X_test.shape
    print('Test accuracy on legitimate test examples: ' + str(accuracy))
    pass


# Train a CIFAR model
th_model_train(x,
               y,
               predictions,
               model.trainable_weights,
               X_train,
               Y_train,
               evaluate=evaluate,
               args=args)
# Craft adversarial examples using Fast Gradient Sign Method (FGSM)
adv_x = fgsm(x, predictions, eps=0.3)
X_test_adv, = batch_eval([x], [adv_x], [X_test], args=args)
assert X_test_adv.shape[0] == 10000, X_test_adv.shape

# Evaluate the accuracy of the CIFAR model on adversarial examples
accuracy = th_model_eval(x, y, predictions, X_test_adv, Y_test, args=args)
print('Test accuracy on adversarial examples: ' + str(accuracy))

print("Repeating the process, using adversarial training")
x_2 = T.tensor4('x_2')
y_2 = T.matrix('y_2')
Пример #4
0
def main():
    """
    Test the accuracy of the MNIST cleverhans tutorial model
    :return:
    """
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size',
                        '-b',
                        default=128,
                        help='Size of training batches')
    parser.add_argument('--train_dir',
                        '-d',
                        default='/tmp',
                        help='Directory storing the saved model.')
    parser.add_argument('--filename',
                        '-f',
                        default='mnist.ckpt',
                        help='Filename to save model under.')
    parser.add_argument('--nb_epochs',
                        '-e',
                        default=6,
                        type=int,
                        help='Number of epochs to train model')
    parser.add_argument('--learning_rate',
                        '-lr',
                        default=0.5,
                        type=float,
                        help='Learning rate for training')
    args = parser.parse_args()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    y_shape = (None, 10)
    x = T.tensor4('x')
    y = T.matrix('y')

    # Define Theano model graph
    model = model_mnist()
    model.build(x_shape)
    predictions = model(x)
    print("Defined Theano model graph.")

    # Train an MNIST model
    th_model_train(x,
                   y,
                   predictions,
                   model.trainable_weights,
                   X_train,
                   Y_train,
                   args=args)

    # Evaluate the accuracy of the MNIST model on legitimate test examples
    accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
    assert float(accuracy) >= 0.98, accuracy

    # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
    adv_x = fgsm(x, predictions, eps=0.3, back='th')
    X_test_adv, = batch_eval([x], [adv_x], [X_test], args=args)
    assert X_test_adv.shape[0] == 10000, X_test_adv.shape

    # Evaluate the accuracy of the MNIST model on adversarial examples
    accuracy = th_model_eval(x, y, predictions, X_test_adv, Y_test, args=args)
    assert float(accuracy) <= 0.1, accuracy
def main():
    """
    MNIST CleverHans tutorial
    :return:
    """

    if not hasattr(backend, "theano"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the Theano backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'th':
        keras.backend.set_image_dim_ordering('th')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
              "'tf', temporarily setting to 'th'")

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', '-b', default=128,
                        help='Size of training batches')
    parser.add_argument('--train_dir', '-d', default='/tmp',
                        help='Directory storing the saved model.')
    parser.add_argument('--filename', '-f',  default='mnist.ckpt',
                        help='Filename to save model under.')
    parser.add_argument('--nb_epochs', '-e', default=6, type=int,
                        help='Number of epochs to train model')
    parser.add_argument('--learning_rate', '-lr', default=0.5, type=float,
                        help='Learning rate for training')
    args = parser.parse_args()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    x = T.tensor4('x')
    y = T.matrix('y')

    # Define Theano model graph
    model = cnn_model()
    model.build(x_shape)
    predictions = model(x)
    print("Defined Theano model graph.")

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
        assert X_test.shape[0] == 10000, X_test.shape
        print('Test accuracy on legitimate test examples: ' + str(accuracy))
        pass

    # Train an MNIST model
    th_model_train(x, y, predictions, model.trainable_weights, X_train,
                   Y_train, evaluate=evaluate, args=args)

    # Initialize the Fast Gradient Sign Method (FGSM) attack object and graph
    fgsm = FastGradientMethod(model, back='th')
    adv_x = fgsm.generate(x, params={'eps': 0.3})

    # Evaluate the accuracy of the MNIST model on adversarial examples
    accuracy = th_model_eval(x, y, model(adv_x), X_test, Y_test, args=args)
    print('Test accuracy on adversarial examples: ' + str(accuracy))

    print("Repeating the process, using adversarial training")
    # Redefine Theano model graph
    model_2 = cnn_model()
    model_2.build(x_shape)
    preds_2 = model_2(x)
    fgsm = FastGradientMethod(model_2, back='th')
    preds_2_adv = model_2(fgsm.generate(x, params={'eps': 0.3}))

    def evaluate_2():
        # Evaluate the accuracy of the adversarialy trained MNIST model on
        # legitimate test examples
        accuracy = th_model_eval(x, y, preds_2, X_test, Y_test, args=args)
        print('Test accuracy on legitimate test examples: ' + str(accuracy))

        # Evaluate the accuracy of the adversarially trained MNIST model on
        # adversarial examples
        acc_adv = th_model_eval(x, y, preds_2_adv, X_test, Y_test, args=args)
        print('Test accuracy on adversarial examples: ' + str(acc_adv))

    # Perform adversarial training
    th_model_train(x, y, preds_2, model_2.trainable_weights, X_train, Y_train,
                   predictions_adv=preds_2_adv, evaluate=evaluate_2, args=args)
Пример #6
0
def main():
    """
    MNIST cleverhans tutorial
    :return:
    """
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', '-b', default=1000, type=int, help='Size of training batches')
    parser.add_argument('--train_dir', '-d', default='/tmp', help='Directory storing the saved model.')
    parser.add_argument('--filename', '-f',  default='mnist.ckpt', help='Filename to save model under.')
    parser.add_argument('--nb_epochs', '-e', default=6, type=int, help='Number of epochs to train model')
    parser.add_argument('--nb_iters', '-i', default=10000, type=int, help='Number of iterations for crafting adversarial examples')
    parser.add_argument('--learning_rate', '-lr', default=0.1, type=float, help='Learning rate for training')
    parser.add_argument('--eps', default=0.01, type=float, help='Epsilon for Carlini L2 Attack')
    parser.add_argument('--kappa', default=0.01, type=float, help='Kappa for Carlini L2 Attack')
    parser.add_argument('--c', default=20, type=float)
    parser.add_argument('--load', default=None, type=str, help='Model path to load')
    parser.add_argument('--dump', default=None, type=str, help='Model path to dump')
    args = parser.parse_args()

    np.random.seed(126)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    y_shape = (None, 10)
    x = T.tensor4('x')
    y = T.matrix('y')
    
    if args.load:
        model = pickle.load(open(args.load, "rb"))
        predictions = model(x)
    else:
        # Define Theano model graph
        model = model_mnist()
        model.build(x_shape)
        predictions = model(x)
        print("Defined Theano model graph.")

        def evaluate():
            # Evaluate the accuracy of the MNIST model on legitimate test examples
            accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
            assert X_test.shape[0] == 10000, X_test.shape
            print('Test accuracy on legitimate test examples: ' + str(accuracy))
            pass

        # Train an MNIST model
        
        th_model_train(x, y, predictions, model.trainable_weights, X_train, Y_train, evaluate=evaluate, args=args)

        if args.dump:
            pickle.dump(model, open(args.dump, "wb"))

    # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
    #for i in range(10):
    carlini_L2(x, predictions, X_test, Y_test, eps=args.eps, kappa=args.kappa, c=args.c, nb_iters=args.nb_iters, batch_size=args.batch_size)
Пример #7
0
def main():
    """
    MNIST cleverhans tutorial
    :return:
    """
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', '-b', default=128, help='Size of training batches')
    parser.add_argument('--train_dir', '-d', default='/tmp', help='Directory storing the saved model.')
    parser.add_argument('--filename', '-f',  default='mnist.ckpt', help='Filename to save model under.')
    parser.add_argument('--nb_epochs', '-e', default=6, type=int, help='Number of epochs to train model')
    parser.add_argument('--learning_rate', '-lr', default=0.5, type=float, help='Learning rate for training')
    args = parser.parse_args()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    y_shape = (None, 10)
    x = T.tensor4('x')
    y = T.matrix('y')

    # Define Theano model graph
    model = model_mnist()
    model.build(x_shape)
    predictions = model(x)
    print("Defined Theano model graph.")

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
        assert X_test.shape[0] == 10000, X_test.shape
        print('Test accuracy on legitimate test examples: ' + str(accuracy))
        pass

    # Train an MNIST model
    th_model_train(x, y, predictions, model.trainable_weights, X_train, Y_train, evaluate=evaluate, args=args)


    # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
    adv_x = fgsm(x, predictions, eps=0.3)
    X_test_adv, = batch_eval([x], [adv_x], [X_test], args=args)
    assert X_test_adv.shape[0] == 10000, X_test_adv.shape

    # Evaluate the accuracy of the MNIST model on adversarial examples
    accuracy = th_model_eval(x, y, predictions, X_test_adv, Y_test, args=args)
    print('Test accuracy on adversarial examples: ' + str(accuracy))

    print("Repeating the process, using adversarial training")
    # Redefine Theano model graph
    model_2 = model_mnist()
    model_2.build(x_shape)
    predictions_2 = model_2(x)
    adv_x_2 = fgsm(x, predictions_2, eps=0.3)
    predictions_2_adv = model_2(adv_x_2)


    def evaluate_2():
        # Evaluate the accuracy of the adversarialy trained MNIST model on
        # legitimate test examples
        accuracy = th_model_eval(x, y, predictions_2, X_test, Y_test, args=args)
        print('Test accuracy on legitimate test examples: ' + str(accuracy))

        # Evaluate the accuracy of the adversarially trained MNIST model on
        # adversarial examples
        accuracy_adv = th_model_eval(x, y, predictions_2_adv, X_test, Y_test, args=args)
        print('Test accuracy on adversarial examples: ' + str(accuracy_adv))

    # Perform adversarial training
    th_model_train(x, y, predictions_2, model_2.trainable_weights, X_train, Y_train, predictions_adv=predictions_2_adv,
            evaluate=evaluate_2, args=args)