Esempio n. 1
0
    def test_tfclassifier(self):
        """
        First test with the TensorFlowClassifier.
        :return:
        """
        # Build TensorFlowClassifiers
        victim_tfc, sess = get_classifier_tf()

        # Define input and output placeholders
        input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
        output_ph = tf.placeholder(tf.int32, shape=[None, 10])

        # Define the tensorflow graph
        conv = tf.layers.conv2d(input_ph, 1, 7, activation=tf.nn.relu)
        conv = tf.layers.max_pooling2d(conv, 4, 4)
        flattened = tf.layers.flatten(conv)

        # Logits layer
        logits = tf.layers.dense(flattened, 10)

        # Train operator
        loss = tf.reduce_mean(
            tf.losses.softmax_cross_entropy(logits=logits,
                                            onehot_labels=output_ph))
        optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
        train = optimizer.minimize(loss)

        # TensorFlow session and initialization
        sess.run(tf.global_variables_initializer())

        # Create the classifier
        thieved_tfc = TensorFlowClassifier(clip_values=(0, 1),
                                           input_ph=input_ph,
                                           output=logits,
                                           labels_ph=output_ph,
                                           train=train,
                                           loss=loss,
                                           learning=None,
                                           sess=sess)

        # Create attack
        copycat_cnn = CopycatCNN(classifier=victim_tfc,
                                 batch_size_query=BATCH_SIZE,
                                 batch_size_fit=BATCH_SIZE,
                                 nb_epochs=NB_EPOCHS,
                                 nb_stolen=NB_STOLEN)
        thieved_tfc = copycat_cnn.extract(x=self.x_train,
                                          thieved_classifier=thieved_tfc)

        victim_preds = np.argmax(victim_tfc.predict(x=self.x_train[:100]),
                                 axis=1)
        thieved_preds = np.argmax(thieved_tfc.predict(x=self.x_train[:100]),
                                  axis=1)
        acc = np.sum(victim_preds == thieved_preds) / len(victim_preds)

        self.assertGreater(acc, 0.3)

        # Clean-up session
        sess.close()
        tf.reset_default_graph()
Esempio n. 2
0
    def test_iris_tf(self):
        """
        First test for TF.
        :return:
        """
        # Get the TF classifier
        victim_tfc, sess = get_iris_classifier_tf()

        # Define input and output placeholders
        input_ph = tf.placeholder(tf.float32, shape=[None, 4])
        output_ph = tf.placeholder(tf.int32, shape=[None, 3])

        # Define the tensorflow graph
        dense1 = tf.layers.dense(input_ph, 10)
        dense2 = tf.layers.dense(dense1, 10)
        logits = tf.layers.dense(dense2, 3)

        # Train operator
        loss = tf.reduce_mean(
            tf.losses.softmax_cross_entropy(logits=logits,
                                            onehot_labels=output_ph))
        optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
        train = optimizer.minimize(loss)

        # Tensorflow session and initialization
        sess.run(tf.global_variables_initializer())

        # Train the classifier
        thieved_tfc = TensorFlowClassifier(clip_values=(0, 1),
                                           input_ph=input_ph,
                                           output=logits,
                                           labels_ph=output_ph,
                                           train=train,
                                           loss=loss,
                                           learning=None,
                                           sess=sess,
                                           channel_index=1)

        # Create attack
        copycat_cnn = CopycatCNN(classifier=victim_tfc,
                                 batch_size_fit=BATCH_SIZE,
                                 batch_size_query=BATCH_SIZE,
                                 nb_epochs=NB_EPOCHS,
                                 nb_stolen=NB_STOLEN)
        thieved_tfc = copycat_cnn.extract(x=self.x_train,
                                          thieved_classifier=thieved_tfc)

        victim_preds = np.argmax(victim_tfc.predict(x=self.x_train[:100]),
                                 axis=1)
        thieved_preds = np.argmax(thieved_tfc.predict(x=self.x_train[:100]),
                                  axis=1)
        acc = np.sum(victim_preds == thieved_preds) / len(victim_preds)

        self.assertGreater(acc, 0.3)

        # Clean-up session
        sess.close()
        tf.reset_default_graph()
def cnn_mnist_tf(input_shape):
    labels_tf = tf.placeholder(tf.float32, [None, 10])
    inputs_tf = tf.placeholder(tf.float32, [None] + list(input_shape))

    # Define the tensorflow graph
    conv = tf.layers.conv2d(inputs_tf, 4, 5, activation=tf.nn.relu)
    conv = tf.layers.max_pooling2d(conv, 2, 2)
    fc = tf.contrib.layers.flatten(conv)

    # Logits layer
    logits = tf.layers.dense(fc, 10)

    # Train operator
    loss = tf.reduce_mean(
        tf.losses.softmax_cross_entropy(logits=logits,
                                        onehot_labels=labels_tf))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train_tf = optimizer.minimize(loss)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    classifier = TensorFlowClassifier(clip_values=(0, 1),
                                      input_ph=inputs_tf,
                                      output=logits,
                                      loss=loss,
                                      train=train_tf,
                                      labels_ph=labels_tf,
                                      sess=sess)
    return classifier
    def _create_tfclassifier():
        """
        To create a simple TensorFlowClassifier for testing.
        :return:
        """
        import tensorflow as tf

        if tf.__version__[0] == "2":
            import tensorflow.compat.v1 as tf

            tf.disable_eager_execution()

        # Define input and output placeholders
        input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
        labels_ph = tf.placeholder(tf.int32, shape=[None, 10])

        # Define the TensorFlow graph
        conv = tf.layers.conv2d(input_ph, 4, 5, activation=tf.nn.relu)
        conv = tf.layers.max_pooling2d(conv, 2, 2)
        fc = tf.layers.flatten(conv)

        # Logits layer
        logits = tf.layers.dense(fc, 10)

        # Train operator
        loss = tf.reduce_mean(
            tf.losses.softmax_cross_entropy(logits=logits,
                                            onehot_labels=labels_ph))
        optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
        train = optimizer.minimize(loss)

        # TensorFlow session and initialization
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        # Create the classifier
        tfc = TensorFlowClassifier(
            input_ph=input_ph,
            output=logits,
            labels_ph=labels_ph,
            train=train,
            loss=loss,
            learning=None,
            sess=sess,
            clip_values=(0, 1),
        )

        return tfc
Esempio n. 5
0
def get_classifier_tf(from_logits=False):
    """
    Standard TensorFlow classifier for unit testing.

    The following hyper-parameters were used to obtain the weights and biases:
    learning_rate: 0.01
    batch size: 10
    number of epochs: 2
    optimizer: tf.train.AdamOptimizer

    :return: TensorFlowClassifier, tf.Session()
    """
    # pylint: disable=E0401
    import tensorflow as tf
    if tf.__version__[0] == '2':
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()
    from art.classifiers import TensorFlowClassifier

    # Define input and output placeholders
    input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
    output_ph = tf.placeholder(tf.int32, shape=[None, 10])

    # Define the tensorflow graph
    conv = tf.layers.conv2d(
        input_ph,
        1,
        7,
        activation=tf.nn.relu,
        kernel_initializer=_tf_weights_loader('MNIST', 'W', 'CONV2D'),
        bias_initializer=_tf_weights_loader('MNIST', 'B', 'CONV2D'))
    conv = tf.layers.max_pooling2d(conv, 4, 4)
    flattened = tf.layers.flatten(conv)

    # Logits layer
    logits = tf.layers.dense(
        flattened,
        10,
        kernel_initializer=_tf_weights_loader('MNIST', 'W', 'DENSE'),
        bias_initializer=_tf_weights_loader('MNIST', 'B', 'DENSE'))

    # probabilities
    probabilities = tf.keras.activations.softmax(x=logits)

    # Train operator
    loss = tf.reduce_mean(
        tf.losses.softmax_cross_entropy(logits=logits,
                                        onehot_labels=output_ph))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train = optimizer.minimize(loss)

    # TensorFlow session and initialization
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Create the classifier
    if from_logits:
        tfc = TensorFlowClassifier(clip_values=(0, 1),
                                   input_ph=input_ph,
                                   output=logits,
                                   labels_ph=output_ph,
                                   train=train,
                                   loss=loss,
                                   learning=None,
                                   sess=sess)
    else:
        tfc = TensorFlowClassifier(clip_values=(0, 1),
                                   input_ph=input_ph,
                                   output=probabilities,
                                   labels_ph=output_ph,
                                   train=train,
                                   loss=loss,
                                   learning=None,
                                   sess=sess)

    return tfc, sess
Esempio n. 6
0
def get_iris_classifier_tf():
    """
    Standard TensorFlow classifier for unit testing.

    The following hyper-parameters were used to obtain the weights and biases:

    * learning_rate: 0.01
    * batch size: 5
    * number of epochs: 200
    * optimizer: tf.train.AdamOptimizer

    The model is trained of 70% of the dataset, and 30% of the training set is used as validation split.

    :return: The trained model for Iris dataset and the session.
    :rtype: `tuple(TensorFlowClassifier, tf.Session)`
    """
    import tensorflow as tf
    if tf.__version__[0] == '2':
        # pylint: disable=E0401
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()
    from art.classifiers import TensorFlowClassifier

    # Define input and output placeholders
    input_ph = tf.placeholder(tf.float32, shape=[None, 4])
    output_ph = tf.placeholder(tf.int32, shape=[None, 3])

    # Define the tensorflow graph
    dense1 = tf.layers.dense(
        input_ph,
        10,
        kernel_initializer=_tf_weights_loader('IRIS', 'W', 'DENSE1'),
        bias_initializer=_tf_weights_loader('IRIS', 'B', 'DENSE1'))
    dense2 = tf.layers.dense(
        dense1,
        10,
        kernel_initializer=_tf_weights_loader('IRIS', 'W', 'DENSE2'),
        bias_initializer=_tf_weights_loader('IRIS', 'B', 'DENSE2'))
    logits = tf.layers.dense(
        dense2,
        3,
        kernel_initializer=_tf_weights_loader('IRIS', 'W', 'DENSE3'),
        bias_initializer=_tf_weights_loader('IRIS', 'B', 'DENSE3'))

    # Train operator
    loss = tf.reduce_mean(
        tf.losses.softmax_cross_entropy(logits=logits,
                                        onehot_labels=output_ph))

    # TensorFlow session and initialization
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Train the classifier
    tfc = TensorFlowClassifier(clip_values=(0, 1),
                               input_ph=input_ph,
                               output=logits,
                               labels_ph=output_ph,
                               train=None,
                               loss=loss,
                               learning=None,
                               sess=sess,
                               channel_index=1)

    return tfc, sess
Esempio n. 7
0
def get_tabular_classifier_tf_v1(load_init=True, sess=None):
    """
    Standard TensorFlow classifier for unit testing.

    The following hyper-parameters were used to obtain the weights and biases:

    * learning_rate: 0.01
    * batch size: 5
    * number of epochs: 200
    * optimizer: tf.train.AdamOptimizer

    The model is trained of 70% of the dataset, and 30% of the training set is used as validation split.

    :param load_init: Load the initial weights if True.
    :type load_init: `bool`
    :param sess: Computation session.
    :type sess: `tf.Session`
    :return: The trained model for Iris dataset and the session.
    :rtype: `tuple(TensorFlowClassifier, tf.Session)`
    """
    import tensorflow as tf

    if tf.__version__[0] == "2":
        # pylint: disable=E0401
        import tensorflow.compat.v1 as tf

        tf.disable_eager_execution()
    from art.classifiers import TensorFlowClassifier

    # Define input and output placeholders
    input_ph = tf.placeholder(tf.float32, shape=[None, 4])
    output_ph = tf.placeholder(tf.int32, shape=[None, 3])

    # Define the TensorFlow graph
    if load_init:
        dense1 = tf.layers.dense(
            input_ph,
            10,
            kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE1"),
            bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE1"),
        )
        dense2 = tf.layers.dense(
            dense1,
            10,
            kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE2"),
            bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE2"),
        )
        logits = tf.layers.dense(
            dense2,
            3,
            kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE3"),
            bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE3"),
        )
    else:
        dense1 = tf.layers.dense(input_ph, 10)
        dense2 = tf.layers.dense(dense1, 10)
        logits = tf.layers.dense(dense2, 3)

    # Train operator
    loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train = optimizer.minimize(loss)

    # TensorFlow session and initialization
    if sess is None:
        sess = tf.Session()
    elif not isinstance(sess, tf.Session):
        raise TypeError("An instance of `tf.Session` should be passed to `sess`.")

    sess.run(tf.global_variables_initializer())

    # Train the classifier
    tfc = TensorFlowClassifier(
        clip_values=(0, 1),
        input_ph=input_ph,
        output=logits,
        labels_ph=output_ph,
        train=train,
        loss=loss,
        learning=None,
        sess=sess,
        channel_index=1,
    )

    return tfc, sess
Esempio n. 8
0
def get_image_classifier_tf_v1(from_logits=False, load_init=True, sess=None):
    """
    Standard TensorFlow classifier for unit testing.

    The following hyper-parameters were used to obtain the weights and biases:
    learning_rate: 0.01
    batch size: 10
    number of epochs: 2
    optimizer: tf.train.AdamOptimizer

    :param from_logits: Flag if model should predict logits (True) or probabilities (False).
    :type from_logits: `bool`
    :param load_init: Load the initial weights if True.
    :type load_init: `bool`
    :param sess: Computation session.
    :type sess: `tf.Session`
    :return: TensorFlowClassifier, tf.Session()
    """
    # pylint: disable=E0401
    import tensorflow as tf

    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
    if tf.__version__[0] == "2":
        import tensorflow.compat.v1 as tf

        tf.disable_eager_execution()
    from art.classifiers import TensorFlowClassifier

    # Define input and output placeholders
    input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
    output_ph = tf.placeholder(tf.float32, shape=[None, 10])

    # Define the TensorFlow graph
    if load_init:
        conv = tf.layers.conv2d(
            input_ph,
            1,
            7,
            activation=tf.nn.relu,
            kernel_initializer=_tf_weights_loader("MNIST", "W", "CONV2D"),
            bias_initializer=_tf_weights_loader("MNIST", "B", "CONV2D"),
        )
    else:
        conv = tf.layers.conv2d(input_ph, 1, 7, activation=tf.nn.relu)

    conv = tf.layers.max_pooling2d(conv, 4, 4)
    flattened = tf.layers.flatten(conv)

    # Logits layer
    if load_init:
        logits = tf.layers.dense(
            flattened,
            10,
            kernel_initializer=_tf_weights_loader("MNIST", "W", "DENSE"),
            bias_initializer=_tf_weights_loader("MNIST", "B", "DENSE"),
        )
    else:
        logits = tf.layers.dense(flattened, 10)

    # probabilities
    probabilities = tf.keras.activations.softmax(x=logits)

    # Train operator
    loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train = optimizer.minimize(loss)

    # TensorFlow session and initialization
    if sess is None:
        sess = tf.Session()
    elif not isinstance(sess, tf.Session):
        raise TypeError("An instance of `tf.Session` should be passed to `sess`.")

    sess.run(tf.global_variables_initializer())

    # Create the classifier
    if from_logits:
        tfc = TensorFlowClassifier(
            clip_values=(0, 1),
            input_ph=input_ph,
            output=logits,
            labels_ph=output_ph,
            train=train,
            loss=loss,
            learning=None,
            sess=sess,
        )
    else:
        tfc = TensorFlowClassifier(
            clip_values=(0, 1),
            input_ph=input_ph,
            output=probabilities,
            labels_ph=output_ph,
            train=train,
            loss=loss,
            learning=None,
            sess=sess,
        )

    return tfc, sess