コード例 #1
0
def main():
    """
    MNIST cleverhans tutorial
    :return:
    """

    if not hasattr(backend, "theano"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the Theano backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'th':
        keras.backend.set_image_dim_ordering('th')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to 'tf', temporarily setting to 'th'")

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', '-b', default=128, help='Size of training batches')
    parser.add_argument('--train_dir', '-d', default='/tmp', help='Directory storing the saved model.')
    parser.add_argument('--filename', '-f',  default='mnist.ckpt', help='Filename to save model under.')
    parser.add_argument('--nb_epochs', '-e', default=6, type=int, help='Number of epochs to train model')
    parser.add_argument('--learning_rate', '-lr', default=0.5, type=float, help='Learning rate for training')
    args = parser.parse_args()

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input Theano placeholder
    x_shape = (None, 1, 28, 28)
    y_shape = (None, 10)
    x = T.tensor4('x')
    y = T.matrix('y')

    # Define Theano model graph
    model = cnn_model()
    model.build(x_shape)
    predictions = model(x)
    print("Defined Theano model graph.")

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = th_model_eval(x, y, predictions, X_test, Y_test, args=args)
        assert X_test.shape[0] == 10000, X_test.shape
        print('Test accuracy on legitimate test examples: ' + str(accuracy))
        pass

    # Train an MNIST model
    #th_model_train(x, y, predictions, model.trainable_weights, X_train, Y_train, evaluate=evaluate, args=args)


    # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
    adv_x = fgsm(x, predictions, eps=0.3)
    X_test_adv, = batch_eval([x], [adv_x], [X_test], args=args)

    assert X_test_adv.shape[0] == 10000, X_test_adv.shape
コード例 #2
0
def main(argv=None):

    keras.layers.core.K.set_learning_phase(1)
    manual_variable_initialization(True)

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    # WRM parameters
    wrm_params = {'eps': 1.3, 'ord': 2, 'y': y, 'steps': 1}
    wrm_eval_params = {'eps': 1.3, 'ord': 2, 'y': y, 'steps': 15}
    # new WRM model needs this
    nb_batches = int(math.ceil(float(len(X_train)) / FLAGS.batch_size))

    # Define TF model graph
    model = cnn_model(activation='elu')
    predictions = model(x)
    wrm = WassersteinRobustMethod(model, X_train, nb_batches, sess=sess)
    predictions_adv_wrm = model(wrm.generate(x, **wrm_params))
    predictions_adv_eval = model(wrm.generate(x, **wrm_eval_params))

    f = open(file, 'a')
    f_writter = csv.writer(f)
    f_writter.writerow('--- base version ---')

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = model_eval(sess, x, y, predictions, X_test, Y_test, args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the model on Wasserstein adversarial examples
        accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_eval, X_test,
                                       Y_test, args=eval_params)
        print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)
        f = open(file, 'a')
        f_writter = csv.writer(f)
        f_writter.writerow((accuracy, accuracy_adv_wass))

    # Train the model
    model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate,
                args=train_params, save=False)
    model.model.save(FLAGS.train_dir + '/' + FLAGS.filename_erm)

    print('')
    print("Repeating the process, using Wasserstein adversarial training")

    f = open(file, 'a')
    f_writter = csv.writer(f)
    f_writter.writerow('--- robust version ---')

    # Redefine TF model graph
    model_adv = cnn_model(activation='elu')
    predictions_adv = model_adv(x)
    wrm2 = WassersteinRobustMethod(model_adv, sess=sess)
    predictions_adv_adv_wrm = model_adv(wrm2.generate(x, **wrm_params))
    predictions_adv_eval = model_adv(wrm2.generate(x, **wrm_eval_params))

    def evaluate_adv():
        # Accuracy of adversarially trained model on legitimate test inputs
        accuracy = model_eval(sess, x, y, predictions_adv, X_test, Y_test, args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the adversarially trained model on Wasserstein adversarial examples
        accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_eval,
                                       X_test, Y_test, args=eval_params)
        print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)
        f = open(file, 'a')
        f_writter = csv.writer(f)
        f_writter.writerow((accuracy, accuracy_adv_wass))

    model_train = RobustTraining(sess, model, X_train, Y_train)
    model_train.train(x, y, predictions_adv_adv_wrm,
                predictions_adv=predictions_adv_adv_wrm, evaluate=evaluate_adv,
                args=train_params, save=False)
    model_adv.model.save(FLAGS.train_dir + '/' + FLAGS.filename_wrm)
コード例 #3
0
    trainset_loader = DataLoader(trainset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=2)
    print('\n----- Load Val Set-------')
    valset = VideoDatasetArray(root=data_folder_val,
                               n_frames=n_frames,
                               transform=transform,
                               train=False)
    valset_loader = DataLoader(valset,
                               batch_size=batch_size,
                               shuffle=True,
                               num_workers=2)

    # Train model
    conv_model = cnn_model()
    model = Attention(conv_model,
                      n_frames=n_frames,
                      hidden_size=100,
                      maxpool=True)
    model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
    finetune = FineTune(model,
                        'attention',
                        epoch=epoch,
                        batch_size=batch_size,
                        optimizer=optimizer,
                        filename=log_name,
                        trainset_loader=trainset_loader,
                        valset_loader=valset_loader,
コード例 #4
0
def main(argv=None):

    keras.layers.core.K.set_learning_phase(1)
    manual_variable_initialization(True)

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    # Define TF model graph
    model = cnn_model(activation='elu')
    predictions = model(x)
    wrm = WassersteinRobustMethod(model, sess=sess)
    wrm_params = {'eps': 1.3, 'ord': 2, 'y': y, 'steps': 15}
    predictions_adv_wrm = model(wrm.generate(x, **wrm_params))

    fgsm = FastGradientMethod(model, sess=sess)
    fgsm_params = {'eps': 0.1, 'clip_min': 0., 'clip_max': 1.}
    adv_fgsm = fgsm.generate(x, **fgsm_params)
    adv_fgsm = tf.stop_gradient(adv_fgsm)
    preds_adv_fgsm = model(adv_fgsm)

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              predictions,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the model on Wasserstein adversarial examples
        # accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_wrm, X_test, \
        #                                Y_test, args=eval_params)
        # print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)

        # Accuracy of the model on Wasserstein adversarial examples
        accuracy_adv_fgsm = model_eval(sess, x, y, preds_adv_fgsm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on fgsm examples: %0.4f\n' % accuracy_adv_fgsm)

    # Train the model
    model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate, \
                args=train_params, save=False)
    model.model.save(FLAGS.train_dir + '/' + FLAGS.filename_erm)

    print('')
    print("Repeating the process, using Wasserstein adversarial training")
    # Redefine TF model graph
    model_adv = cnn_model(activation='elu')
    predictions_adv = model_adv(x)
    wrm2 = WassersteinRobustMethod(model_adv, sess=sess)
    wrm_params = {'eps': 1.3, 'ord': 2, 'y': y, 'steps': 15}
    predictions_adv_adv_wrm = model_adv(wrm2.generate(x, **wrm_params))

    fgsm = FastGradientMethod(model_adv, sess=sess)
    fgsm_params = {'eps': 0.2, 'clip_min': 0., 'clip_max': 1.}
    adv_fgsm = fgsm.generate(x, **fgsm_params)
    adv_fgsm = tf.stop_gradient(adv_fgsm)
    preds_adv_fgsm = model_adv(adv_fgsm)

    def evaluate_adv():
        # Accuracy of adversarially trained model on legitimate test inputs
        accuracy = model_eval(sess,
                              x,
                              y,
                              predictions_adv,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the adversarially trained model on Wasserstein adversarial examples
        # accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_adv_wrm, \
        #                                X_test, Y_test, args=eval_params)
        # print('Test accuracy on Wasserstein examples: %0.4f\n' % accuracy_adv_wass)

        # Accuracy of the model on Wasserstein adversarial examples
        accuracy_adv_fgsm = model_eval(sess, x, y, preds_adv_fgsm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on fgsm examples: %0.4f\n' % accuracy_adv_fgsm)

    model_train(sess, x, y, predictions_adv_adv_wrm, X_train, Y_train, \
                predictions_adv=predictions_adv_adv_wrm, evaluate=evaluate_adv, \
                args=train_params, save=False)
    model_adv.model.save(FLAGS.train_dir + '/' + FLAGS.filename_wrm)
コード例 #5
0
    b) Set `th_dim_model` = ... (create your th dim model here and set it to th_dim_model)
    c) Set `tf_dim_model` = ... (create your tf dim model here and set it to tf_dim_model)
    d) Add the path to the weight files in `model_weights`.
       Note : The weight files must be for the Theano model (theano kernels, th dim ordering)

3) Run the script.

4) Use the weight files in the created folders : ["tf-kernels-tf-dim/", "tf-kernels-th-dim/", "th-kernels-tf-dim/"]
'''

K.set_image_dim_ordering('th')
th_dim_model = mnist_network.build_model(
)  # Create your theano model here with TH dim ordering

K.set_image_dim_ordering('tf')
tf_dim_model = utils.cnn_model(
)  # Create your tensorflow model with TF dimordering here

#model_weights = ['mnist.h5']
model_weights = ['mnist_retrained_pixelSets_5526_20_L1_0.03.h5'
                 ]  # Add names of theano model weight file paths here.
# These weights are assumed to be for  theano backend
# (th kernels) with th dim ordering!
"""

No need to edit anything below this. Simply run the script now after
editing the above 3 inputs.

"""


def shuffle_rows(original_w, nb_last_conv, nb_rows_dense):
コード例 #6
0
def main(argv=None):
    """
    MNIST tutorial for the Jacobian-based saliency map approach (JSMA)
    :return:
    """

    os.environ['KERAS_BACKEND']='tensorflow'

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)
    
    fileName = "statistics/JAMA_dataCollection_%s.txt"%(FLAGS.round)
    fileHandler = open(fileName, 'a')

    ###########################################################################
    # Define the dataset and model
    ###########################################################################

    # Image dimensions ordering should follow the Theano convention
    if K.image_dim_ordering() != 'tf':
        K.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
              "to 'th', temporarily setting to 'tf'")

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    K.set_session(sess)
    print("Created TensorFlow session and set Keras backend.")

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    print("Loaded MNIST test data.")

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    # Define TF model graph
    model = cnn_model()
    
    ############
    #
    ###########
    
    first_dense = True
    
    if FLAGS.round ==1 : 
        weight_fn = 'tf-kernels-tf-dim-ordering/mnist.h5'
    else: 
        weight_fn = 'tf-kernels-tf-dim-ordering/mnist_retrained_pixelSets_5526_20_L1_0.03.h5'
    model.load_weights(weight_fn) # tf-kernels-tf-dim
    convert_all_kernels_in_model(model) # th-kernels-tf-dim

    count_dense = 0
    for layer in model.layers:
        if layer.__class__.__name__ == "Dense":
            count_dense += 1

    if count_dense == 1:
        first_dense = False # If there is only 1 dense, no need to perform row shuffle in Dense layer

    print("Nb layers : ", len(model.layers))

    for index, tf_layer in enumerate(model.layers):
        if tf_layer.__class__.__name__ in ['Convolution1D',
                                           'Convolution2D',
                                           'Convolution3D',
                                           'AtrousConvolution2D',
                                           'Deconvolution2D']:
            weights = tf_layer.get_weights() # th-kernels-tf-dim
            model.layers[index].set_weights(weights) # th-kernels-tf-dim

            nb_last_conv = tf_layer.nb_filter # preserve last number of convolutions to use with dense layers
            print("Converted layer %d : %s" % (index + 1, tf_layer.name))
        else:
            if tf_layer.__class__.__name__ == "Dense" and first_dense:
                weights = tf_layer.get_weights()
                nb_rows_dense_layer = weights[0].shape[0] // nb_last_conv

                print("Magic Number 1 : ", nb_last_conv)
                print("Magic nunber 2 : ", nb_rows_dense_layer)

                model.layers[index].set_weights(weights)

                first_dense = False
                print("Shuffled Dense Weights layer and saved %d : %s" % (index + 1, tf_layer.name))
            else:
                model.layers[index].set_weights(tf_layer.get_weights())
                print("Saved layer %d : %s" % (index + 1, tf_layer.name))
    
    predictions = model(x)
    print("Defined TensorFlow model graph.")
    
    
    #filename = "pic/%s.jpg"%(FLAGS.starting_index)
    #testImage = np.squeeze(X_test[(FLAGS.starting_index):(FLAGS.starting_index+1)][0])
    #print("%s--%s"%(str(np.amax(testImage)), str(np.amin(testImage))))
    #save(0,testImage,filename)
    

    ###########################################################################
    # Training the model using TensorFlow
    ###########################################################################

    '''

    # Train an MNIST model if it does not exist in the train_dir folder
    saver = tf.train.Saver()
    save_path = os.path.join(FLAGS.train_dir, FLAGS.filename)
    if os.path.isfile(save_path):
        saver.restore(sess, os.path.join(FLAGS.train_dir, FLAGS.filename))
    else:
        train_params = {
            'nb_epochs': FLAGS.nb_epochs,
            'batch_size': FLAGS.batch_size,
            'learning_rate': FLAGS.learning_rate
        }
        model_train(sess, x, y, predictions, X_train, Y_train,
                    args=train_params)
        saver.save(sess, save_path)

    # Evaluate the accuracy of the MNIST model on legitimate test examples
    eval_params = {'batch_size': FLAGS.batch_size}
    accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,
                          args=eval_params)
    assert X_test.shape[0] == 10000, X_test.shape
    print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
    
    '''


    ###########################################################################
    # Craft adversarial examples using the Jacobian-based saliency map approach
    ###########################################################################
    print('Crafting ' + str(FLAGS.source_samples) + ' * ' +
          str(FLAGS.nb_classes-1) + ' adversarial examples')

    # This array indicates whether an adversarial example was found for each
    # test set sample and target class
    results = np.zeros((FLAGS.nb_classes, FLAGS.source_samples), dtype='i')

    # This array contains the fraction of perturbed features for each test set
    # sample and target class
    perturbations = np.zeros((FLAGS.nb_classes, FLAGS.source_samples),
                             dtype='f')

    # Define the TF graph for the model's Jacobian
    grads = jacobian_graph(predictions, x, FLAGS.nb_classes)

    # Initialize our array for grid visualization
    grid_shape = (FLAGS.nb_classes,
                  FLAGS.nb_classes,
                  FLAGS.img_rows,
                  FLAGS.img_cols,
                  FLAGS.nb_channels)
    grid_viz_data = np.zeros(grid_shape, dtype='f')
    
    eud = {}
    l1d = {}
    succ = {}

    # Loop over the samples we want to perturb into adversarial examples
    for sample_ind in xrange(0, FLAGS.source_samples):
        # We want to find an adversarial example for each possible target class
        # (i.e. all classes that differ from the label given in the dataset)
        current_class = int(np.argmax(Y_test[FLAGS.starting_index + sample_ind]))
        target_classes = other_classes(FLAGS.nb_classes, current_class)
        
        print('working with image id: %s\n'%(FLAGS.starting_index+sample_ind))
        filename = "pic/%s_jsma.jpg"%(FLAGS.starting_index + sample_ind)
        testImage = np.squeeze(X_test[(FLAGS.starting_index + sample_ind):(FLAGS.starting_index + sample_ind+1)][0])
        save(0,testImage,filename)

        # For the grid visualization, keep original images along the diagonal
        #grid_viz_data[current_class, current_class, :, :, :] = np.reshape(
        #        X_test[sample_ind:(sample_ind+1)],
        #        (FLAGS.img_rows, FLAGS.img_cols, FLAGS.nb_channels))
                
        # initialise data collection
        eud[sample_ind] = 1000.0
        l1d[sample_ind] = 1000.0
        succ[sample_ind] = 0

        # Loop over all target classes
        for target in target_classes:
            print('--------------------------------------')
            print('Creating adv. example for target class ' + str(target))

            # This call runs the Jacobian-based saliency map approach
            adv_x, res, percent_perturb = jsma(sess, x, predictions, grads,
                                               X_test[(FLAGS.starting_index+sample_ind):
                                                      (FLAGS.starting_index+sample_ind+1)],
                                               target, theta=FLAGS.thetaValue, gamma=0.05,
                                               increase=True, back='tf',
                                               clip_min=0, clip_max=1)
                                               
            #print(np.max(adv_x))

            # Display the original and adversarial images side-by-side
            #if FLAGS.viz_enabled:
            #    if 'figure' not in vars():
            #            figure = pair_visual(
            #                    np.reshape(X_test[(FLAGS.starting_index+sample_ind):(FLAGS.starting_index+sample_ind+1)],
            #                               (FLAGS.img_rows, FLAGS.img_cols)),
            #                    np.reshape(adv_x,
            #                               (FLAGS.img_rows, FLAGS.img_cols)))
            #    else:
            #        figure = pair_visual(
            #                np.reshape(X_test[(FLAGS.starting_index+sample_ind):(FLAGS.starting_index+sample_ind+1)],
            #                           (FLAGS.img_rows, FLAGS.img_cols)),
            #                np.reshape(adv_x, (FLAGS.img_rows,
            #                           FLAGS.img_cols)), figure)

            # Add our adversarial example to our grid data
            #grid_viz_data[target, current_class, :, :, :] = np.reshape(
            #        adv_x, (FLAGS.img_rows, FLAGS.img_cols, FLAGS.nb_channels))
                    
            filename = "pic/%s_jsma_%s_%s.jpg"%(FLAGS.starting_index+sample_ind,FLAGS.thetaValue,target)                        
            testImage1 = np.squeeze(adv_x[0])
            fileHandler.write("\nimage id: %s\n"%(FLAGS.starting_index+sample_ind))
            fileHandler.write("theta value: %s\n"%(FLAGS.thetaValue))
            fileHandler.write("target: %s\n"%(target))
            fileHandler.write("euclidean distance: %s\n"%(euclideanDistance(testImage1,testImage))) 
            fileHandler.write("L1 distance: %s\n"%(l1Distance(testImage1,testImage)))
            save(0,testImage1,filename)


            # Update the arrays for later analysis
            results[target, sample_ind] = res
            perturbations[target, sample_ind] = percent_perturb
            
            # collect data 
            temp_x = X_test[FLAGS.starting_index+sample_ind]
            adv_x = adv_x[0]
            temp_eud = euclideanDistance(temp_x,adv_x)
            if eud[sample_ind] > temp_eud: 
                eud[sample_ind] = temp_eud
            temp_l1d = l1Distance(temp_x,adv_x)
            if l1d[sample_ind] > temp_l1d: 
                l1d[sample_ind] = temp_l1d  
            if succ[sample_ind] == 0: 
                succ[sample_ind] = res    
                
            #print("res=%s"%(res)) 

    # Compute the number of adversarial examples that were successfuly found
    nb_targets_tried = ((FLAGS.nb_classes - 1) * FLAGS.source_samples)
    succ_rate = float(np.sum(results)) / nb_targets_tried
    print('Avg. rate of successful adv. examples {0:.2f}'.format(succ_rate))

    # Compute the average distortion introduced by the algorithm
    percent_perturbed = np.mean(perturbations)
    print('Avg. rate of perturbed features {0:.2f}'.format(percent_perturbed))

    # Compute the average distortion introduced for successful samples only
    percent_perturb_succ = np.mean(perturbations * (results == 1))
    print('Avg. rate of perturbed features for successful '
          'adversarial examples {0:.2f}'.format(percent_perturb_succ))
          
    # print data 
    for e in eud.keys():
        eud[e] = eud[e] * succ[e] 
    for e in l1d.keys():
        l1d[e] = l1d[e] * succ[e] 
    print("Average Euclidean distance is %s"%(sum(eud.values()) / float(len(eud))))
    print("Average L1 distance is %s"%(sum(l1d.values()) / float(len(l1d))))
    print("Success rate is %s"%(sum(succ.values()) / float(len(succ))))
    

    fileHandler.write("Average Euclidean distance is %s\n"%(sum(eud.values()) / float(len(eud))))
    fileHandler.write("Average L1 distance is %s\n"%(sum(l1d.values()) / float(len(l1d))))
    fileHandler.write("Success rate is %s\n"%(sum(succ.values()) / float(len(succ))))
    fileHandler.close()
    
    # Close TF session
    sess.close()
コード例 #7
0
ファイル: main.py プロジェクト: vmos1/cnn_supernova
    test_data = dataset('test', data_dict, test_idx)
    del data_dict

    print("\nData shapes: Train {0}, Validation {1}, Test {2}\n".format(
        train_data.x.shape, val_data.x.shape, test_data.x.shape))

    t2 = time.time()
    print("Time taken to read and process input files", t2 - t1)

    #### ML part ####

    for i in model_lst:
        model_name = str(i)

        ### Define Object for cnn_model
        Model = cnn_model(model_name, model_save_dir)

        ### Define the keras ML model and store in the object
        model = f_define_model(config_dict, name=model_name)
        Model.f_build_model(model)

        if args.train:  # If model hasn't been trained, train and save files
            ### Train model ###
            Model.f_train_model(train_data,
                                val_data,
                                num_epochs=num_epochs,
                                batch_size=batch_size)

            ### Save model and history ###
            Model.f_save_model_history()
コード例 #8
0
def main(argv=None):

    keras.layers.core.K.set_learning_phase(1)
    manual_variable_initialization(True)

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)

    # Get toy samples
    # X_train, Y_train, X_test, Y_test = toysamples()
    # X_train, Y_train, X_test, Y_test = data_mnist_flat()
    X_train, Y_train, X_test, Y_test = data_mnist()

    # Define input TF placeholder
    # x = tf.placeholder(tf.float32, shape=(None, 784))
    # y = tf.placeholder(tf.float32, shape=(None, 10))
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    # Define TF model graph ( NOT adv training)

    # model = cnn_model_keras(activation='elu')
    # model = dense_model_keras(activation='elu')
    predictions = cnn_model(x)

    # Attackers: WRM---FGSM---IFGM
    wrm = WassersteinRobustMethod(cnn_model, sess=sess)
    wrm_params = {'eps': 1.3, 'ord': 2, 'y': y, 'steps': 15}
    predictions_adv_wrm = cnn_model(wrm.generate(x, **wrm_params))

    fgsm = FastGradientMethod(cnn_model, sess=sess)
    fgsm_params = {'eps': 0.1, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1.}
    adv_fgsm = fgsm.generate(x, **fgsm_params)
    adv_fgsm = tf.stop_gradient(adv_fgsm)
    preds_adv_fgsm = cnn_model(adv_fgsm)

    ifgm = BasicIterativeMethod(cnn_model, sess=sess)
    ifgm_params = {
        'eps': 0.1,
        'ord': np.inf,
        'eps_iter': 0.02,
        'nb_iter': 10,
        'clip_min': 0.,
        'clip_max': 1.
    }
    adv_ifgm = ifgm.generate(x, **ifgm_params)
    adv_ifgm = tf.stop_gradient(adv_ifgm)
    preds_adv_ifgm = cnn_model(adv_ifgm)

    pgm = MadryEtAl(cnn_model, sess=sess)
    pgm_params = {
        'eps': 0.1,
        'ord': np.inf,
        'eps_iter': 0.01,
        'nb_iter': 30,
        'clip_min': 0.,
        'clip_max': 1.
    }
    adv_pgm = pgm.generate(x, **pgm_params)
    adv_pgm = tf.stop_gradient(adv_pgm)
    preds_adv_pgm = cnn_model(adv_pgm)

    def evaluate():
        # Evaluate the accuracy of the MNIST model on legitimate test examples
        accuracy = model_eval(sess,
                              x,
                              y,
                              predictions,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the model on Wasserstein adversarial examples
        # accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_wrm, X_test, \
        #                                Y_test, args=eval_params)
        # print('Test accuracy on Wasserstein examples: %0.4f' % accuracy_adv_wass)

        # Accuracy of the model on FGSM adversarial examples
        accuracy_adv_fgsm = model_eval(sess, x, y, preds_adv_fgsm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on fgsm examples: %0.4f' % accuracy_adv_fgsm)

        # Accuracy of the model on IFGM adversarial examples
        accuracy_adv_ifgm = model_eval(sess, x, y, preds_adv_ifgm, X_test, \
                                       Y_test, args=eval_params)
        print('Test accuracy on ifgm examples: %0.4f' % accuracy_adv_ifgm)

        # Accuracy of the model on PGM adversarial examples
        # accuracy_adv_pgm = model_eval(sess, x, y, preds_adv_pgm, X_test, \
        #                                Y_test, args=eval_params)
        # print('Test accuracy on pgm examples: %0.4f\n' % accuracy_adv_pgm)

    # Train the model
    # model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate, \
    #             args=train_params, save=False)
    model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate, \
                    regulizer=True, regcons=0.5, model=cnn_model, lossregfunc=True, args=train_params, save=False)