def model_z(nb_filters=32, nb_classes=10, input_shape=(None, 28, 28, 1)): layers = [ Conv2D(nb_filters, (3, 3), (1, 1), "SAME"), ReLU(), Conv2D(nb_filters, (3, 3), (2, 2), "VALID"), ReLU(), Conv2D(2 * nb_filters, (3, 3), (1, 1), "VALID"), ReLU(), Conv2D(2 * nb_filters, (3, 3), (2, 2), "VALID"), ReLU(), Conv2D(4 * nb_filters, (3, 3), (1, 1), "VALID"), ReLU(), Conv2D(4 * nb_filters, (3, 3), (2, 2), "VALID"), ReLU(), Flatten(), Linear(600), ReLU(), Dropout(0.5), Linear(600), ReLU(), Dropout(0.5), Linear(nb_classes), Softmax() ] model = DefenseMLP(layers, input_shape) return model
def model_c(nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1)): layers = [ Conv2D(nb_filters * 2, (3, 3), (1, 1), "SAME", use_bias=True), ReLU(), Conv2D(nb_filters, (5, 5), (2, 2), "VALID", use_bias=True), ReLU(), Flatten(), Dropout(0.25), Linear(128), ReLU(), Dropout(0.5), Linear(nb_classes), Softmax() ] model = DefenseMLP(layers, input_shape) return model
def test_no_drop(): """test_no_drop: Make sure dropout does nothing by default (so it does not cause stochasticity at test time)""" model = MLP(input_shape=[1, 1], layers=[Dropout(name='output')]) x = tf.constant([[1]], dtype=tf.float32) y = model.get_layer(x, 'output') sess = tf.Session() # Do multiple runs because dropout is stochastic for _ in range(10): y_value = sess.run(y) assert y_value == 1.
def model_y(nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1)): layers = [ Conv2D(nb_filters, (3, 3), (1, 1), "SAME"), ReLU(), Conv2D(nb_filters, (3, 3), (2, 2), "VALID"), ReLU(), Conv2D(2 * nb_filters, (3, 3), (2, 2), "VALID"), ReLU(), Conv2D(2 * nb_filters, (3, 3), (2, 2), "VALID"), ReLU(), Flatten(), Linear(256), ReLU(), Dropout(0.5), Linear(256), ReLU(), Dropout(0.5), Linear(nb_classes), Softmax() ] model = DefenseMLP(layers, input_shape, feature_layer='ReLU13') return model
def test_drop(): """test_drop: Make sure dropout is activated successfully""" # We would like to configure the test to deterministically drop, # so that the test does not need to use multiple runs. # However, tf.nn.dropout divides by include_prob, so zero or # infinitesimal include_prob causes NaNs. # 1e-8 does not cause NaNs and shouldn't be a significant source # of test flakiness relative to dependency downloads failing, etc. model = MLP(input_shape=[1, 1], layers=[Dropout(name='output', include_prob=1e-8)]) x = tf.constant([[1]], dtype=tf.float32) y = model.get_layer(x, 'output', dropout=True) sess = tf.Session() y_value = sess.run(y) # Subject to very rare random failure because include_prob is not exact 0 assert y_value == 0., y_value
def test_override(): # Make sure dropout_dict changes dropout probabilities successful # We would like to configure the test to deterministically drop, # so that the test does not need to use multiple runs. # However, tf.nn.dropout divides by include_prob, so zero or # infinitesimal include_prob causes NaNs. # For this test, random failure to drop will not cause the test to fail. # The stochastic version should not even run if everything is working # right. model = MLP(input_shape=[1, 1], layers=[Dropout(name='output', include_prob=1e-8)]) x = tf.constant([[1]], dtype=tf.float32) dropout_dict = {'output': 1.} y = model.get_layer(x, 'output', dropout=True, dropout_dict=dropout_dict) sess = tf.Session() y_value = sess.run(y) assert y_value == 1., y_value
def model_d(input_shape=(None, 28, 28, 1), nb_classes=10): """ Defines the model architecture to be used by the substitute. Use the example model interface. :param img_rows: number of rows in input :param img_cols: number of columns in input :param nb_classes: number of classes in output :return: tensorflow model """ # Define a fully connected model (it's different than the black-box) layers = [ Flatten(), Linear(200), ReLU(), Dropout(0.5), Linear(200), ReLU(), Linear(nb_classes), Softmax() ] return DefenseMLP(layers, input_shape)