Example #1
0
    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              predictions,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the model on Wasserstein adversarial examples
        # accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_wrm, X_test, \
        #                                Y_test, args=eval_params)
        # print('Test accuracy on Wasserstein examples: %0.4f' % accuracy_adv_wass)

        # Accuracy of the model on FGSM adversarial examples
        accuracy_adv_fgsm = model_eval(sess, x, y, preds_adv_fgsm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on fgsm examples: %0.4f' % accuracy_adv_fgsm)

        # Accuracy of the model on IFGM adversarial examples
        accuracy_adv_ifgm = model_eval(sess, x, y, preds_adv_ifgm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on ifgm examples: %0.4f' % accuracy_adv_ifgm)
Example #2
0
 def evaluate_adv():
     # Accuracy of adversarially trained model on legitimate test inputs
     accuracy = model_eval(sess, x, y, predictions_adv, X_test, Y_test, args=eval_params)
     print('Test accuracy on legitimate test examples: %0.4f' % accuracy)
     
     # Accuracy of the adversarially trained model on Wasserstein adversarial examples
     accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_adv_wrm, \
                                    X_test, Y_test, args=eval_params)
     print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)
Example #3
0
    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = model_eval(sess, x, y, predictions, X_test, Y_test, args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the model on Wasserstein adversarial examples
        accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_wrm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)
Example #4
0
    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = model_eval(sess, x, y, predictions, X_test, Y_test, args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the model on Wasserstein adversarial examples
        accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_eval, X_test,
                                       Y_test, args=eval_params)
        print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)
        f = open(file, 'a')
        f_writter = csv.writer(f)
        f_writter.writerow((accuracy, accuracy_adv_wass))