def test_make_confidence_report_bundled(): """ A very simple test that just makes sure make_confidence_report_bundled can run without crashing """ sess = tf.compat.v1.Session() try: nb_classes = 3 nb_features = 2 batch_size = 5 nb_test_examples = batch_size * 2 layer = Linear(num_hid=nb_classes) model = MLP(layers=[layer], input_shape=(None, nb_features)) dataset = SimpleDataset(test_end=nb_test_examples, nb_classes=nb_classes) model.dataset_factory = dataset.get_factory() filepath = ".test_model.joblib" with sess.as_default(): sess.run(tf.compat.v1.global_variables_initializer()) serial.save(filepath, model) def recipe(sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, eps_iter_small, batch_size): """ Mock recipe that just runs the Noise attack so the test runs fast """ attack_configs = [AttackConfig(Noise(model, sess), {'eps': eps})] new_work_goal = {config: 1 for config in attack_configs} goals = [Misclassify(new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=batch_size) make_confidence_report_bundled(filepath, test_end=nb_test_examples, recipe=recipe, base_eps=.1, base_eps_iter=.01, batch_size=batch_size) finally: sess.close()
def test_no_drop(): """test_no_drop: Make sure dropout does nothing by default (so it does not cause stochasticity at test time)""" model = MLP(input_shape=[1, 1], layers=[Dropout(name='output')]) x = tf.constant([[1]], dtype=tf.float32) y = model.get_layer(x, 'output') sess = tf.Session() # Do multiple runs because dropout is stochastic for _ in range(10): y_value = sess.run(y) assert y_value == 1.
def setUp(self): super(TestPerImageStandardize, self).setUp() self.input_shape = (128, 32, 32, 3) self.sess = tf.Session() self.model = MLP(input_shape=self.input_shape, layers=[PerImageStandardize(name='output')]) self.x = tf.placeholder(shape=self.input_shape, dtype=tf.float32) self.y = self.model.get_layer(self.x, 'output') self.y_true = tf.map_fn( lambda ex: tf.image.per_image_standardization(ex), self.x)
def setUp(self): """ Set up session and build model graph """ super(TestPerImageStandardize, self).setUp() self.input_shape = (128, 32, 32, 3) self.sess = tf.Session() self.model = MLP(input_shape=self.input_shape, layers=[PerImageStandardize(name="output")]) self.x = tf.placeholder(shape=self.input_shape, dtype=tf.float32) self.y = self.model.get_layer(self.x, "output") self.y_true = tf.map_fn(tf.image.per_image_standardization, self.x)
class TestPerImageStandardize(CleverHansTest): def setUp(self): super(TestPerImageStandardize, self).setUp() self.input_shape = (128, 32, 32, 3) self.sess = tf.Session() self.model = MLP(input_shape=self.input_shape, layers=[PerImageStandardize(name='output')]) self.x = tf.placeholder(shape=self.input_shape, dtype=tf.float32) self.y = self.model.get_layer(self.x, 'output') self.y_true = tf.map_fn( lambda ex: tf.image.per_image_standardization(ex), self.x) def run_and_check_output(self, x): y, y_true = self.sess.run([self.y, self.y_true], feed_dict={self.x: x}) self.assertClose(y, y_true) def test_random_inputs(self): x = np.random.rand(*self.input_shape) self.run_and_check_output(x) def test_uniform_inputs(self): x = np.ones(self.input_shape) self.run_and_check_output(x)
def test_drop(): """test_drop: Make sure dropout is activated successfully""" # We would like to configure the test to deterministically drop, # so that the test does not need to use multiple runs. # However, tf.nn.dropout divides by include_prob, so zero or # infinitesimal include_prob causes NaNs. # 1e-8 does not cause NaNs and shouldn't be a significant source # of test flakiness relative to dependency downloads failing, etc. model = MLP(input_shape=[1, 1], layers=[Dropout(name='output', include_prob=1e-8)]) x = tf.constant([[1]], dtype=tf.float32) y = model.get_layer(x, 'output', dropout=True) sess = tf.Session() y_value = sess.run(y) # Subject to very rare random failure because include_prob is not exact 0 assert y_value == 0., y_value
def test_override(): # Make sure dropout_dict changes dropout probabilities successful # We would like to configure the test to deterministically drop, # so that the test does not need to use multiple runs. # However, tf.nn.dropout divides by include_prob, so zero or # infinitesimal include_prob causes NaNs. # For this test, random failure to drop will not cause the test to fail. # The stochastic version should not even run if everything is working # right. model = MLP(input_shape=[1, 1], layers=[Dropout(name='output', include_prob=1e-8)]) x = tf.constant([[1]], dtype=tf.float32) dropout_dict = {'output': 1.} y = model.get_layer(x, 'output', dropout=True, dropout_dict=dropout_dict) sess = tf.Session() y_value = sess.run(y) assert y_value == 1., y_value
def make_basic_picklable_cnn(nb_filters=64, nb_classes=10, input_shape=(None, 32, 32, 3)): """The model for the picklable models tutorial. """ if VERSION == 1: layers = [ Conv2D(nb_filters, (8, 8), (2, 2), "SAME"), ReLU(), Conv2D(nb_filters * 2, (6, 6), (2, 2), "VALID"), ReLU(), Conv2D(nb_filters * 2, (5, 5), (1, 1), "VALID"), ReLU(), Flatten(), Linear(nb_classes), Softmax() ] model = MLP(layers, input_shape) else: layers = [ PerImageStandardize(), Conv2D(nb_filters, (3, 3), (1, 1), "SAME"), ReLU(), ResidualWithInstanceNorm(nb_filters, 2), ResidualWithInstanceNorm(nb_filters, 1), ResidualWithInstanceNorm(nb_filters * 2, 2), ResidualWithInstanceNorm(nb_filters * 2, 1), ResidualWithInstanceNorm(nb_filters * 4, 2), ResidualWithInstanceNorm(nb_filters * 4, 1), ResidualWithInstanceNorm(nb_filters * 8, 2), ResidualWithInstanceNorm(nb_filters * 8, 1), GlobalAveragePool(), Linear(nb_classes), Softmax() ] model = MLP(layers, input_shape) return model
def get_model(self, scope): """The model for the picklable models tutorial. """ if self.dataset_name == 'MNIST': nb_filters = 64 nb_classes = self.nb_classes input_shape = (None, 28, 28, 1) layers = [ Conv2D(nb_filters, (8, 8), (2, 2), "SAME"), ReLU(), Conv2D(nb_filters * 2, (6, 6), (2, 2), "VALID"), ReLU(), Conv2D(nb_filters * 2, (5, 5), (1, 1), "VALID"), ReLU(), Flatten(), Linear(nb_classes), Softmax() ] model = MLP(layers, input_shape) if self.dataset_name == 'SVHN': nb_filters = 64 nb_classes = self.nb_classes input_shape = (None, 32, 32, 3) layers = [ Conv2D(nb_filters, (8, 8), (2, 2), "SAME"), ReLU(), Conv2D(nb_filters * 2, (6, 6), (2, 2), "VALID"), ReLU(), Conv2D(nb_filters * 2, (5, 5), (1, 1), "VALID"), ReLU(), Flatten(), Linear(nb_classes), Softmax() ] model = MLP(layers, input_shape) elif self.dataset_name == 'CIFAR10': model = make_wresnet(scope=scope) return model
def make_basic_picklable_cnn(nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1)): """The model for the picklable models tutorial. """ layers = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME"), ReLU(), Conv2D(nb_filters * 2, (6, 6), (2, 2), "VALID"), ReLU(), Conv2D(nb_filters * 2, (5, 5), (1, 1), "VALID"), ReLU(), Flatten(), Linear(nb_classes), Softmax()] model = MLP(layers, input_shape) return model
def make_basic_picklable_substitute(nb_filters=200, nb_classes=2, input_shape=(None, 28, 28, 1)): """The model for the picklable models tutorial. """ layers = [ Flatten(), Linear(nb_filters), ReLU(), Linear(nb_filters), ReLU(), Linear(nb_classes), Softmax() ] model = MLP(layers, input_shape) return model
class TestPerImageStandardize(CleverHansTest): """ Tests for the PerImageStandardize class. """ def setUp(self): """ Set up session and build model graph """ super(TestPerImageStandardize, self).setUp() self.input_shape = (128, 32, 32, 3) self.sess = tf.compat.v1.Session() self.model = MLP(input_shape=self.input_shape, layers=[PerImageStandardize(name='output')]) self.x = tf.compat.v1.placeholder(shape=self.input_shape, dtype=tf.float32) self.y = self.model.get_layer(self.x, 'output') self.y_true = tf.map_fn(tf.image.per_image_standardization, self.x) def run_and_check_output(self, x): """ Make sure y and y_true evaluate to the same value """ y, y_true = self.sess.run([self.y, self.y_true], feed_dict={self.x: x}) self.assertClose(y, y_true) def test_random_inputs(self): """ Test on random inputs """ x = np.random.rand(*self.input_shape) self.run_and_check_output(x) def test_ones_inputs(self): """ Test with input set to all ones. """ x = np.ones(self.input_shape) self.run_and_check_output(x)
def model_train(file_name=FILE_NAME): """ Creates the joblib file of AllConvolutional CIFAR-10 model trained over the MNIST dataset. Parameters ---------- file_name: str, optional The name of the joblib file. """ layers = [Conv2D(64, (3, 3), (1, 1), "SAME"), ReLU(), Conv2D(128, (3, 3), (1, 1), "SAME"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Conv2D(128, (3, 3), (1, 1), "SAME"), ReLU(), Conv2D(256, (3, 3), (1, 1), "SAME"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Conv2D(256, (3, 3), (1, 1), "SAME"), ReLU(), Conv2D(512, (3, 3), (1, 1), "SAME"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Conv2D(10, (3, 3), (1, 1), "SAME"), GlobalAveragePool(), Softmax()] model = MLP(layers, (None, 32, 32, 3)) cifar10 = CIFAR10(train_start=0, train_end=50000, test_start=0, test_end=10000) x_train, y_train = cifar10.get_set('train') x_test, y_test = cifar10.get_set('test') y_train = y_train.reshape((50000, 10)) y_test = y_test.reshape((10000, 10)) model_training(model, file_name, x_train, y_train, x_test, y_test, nb_epochs=10, batch_size=128, learning_rate=.001, label_smoothing=0.1)
def model_train(file_name=FILE_NAME): """ Creates the joblib file of LeNet-5 trained over the MNIST dataset. Parameters ---------- file_name: str, optional The name of the joblib file. """ layers = [ Conv2D(20, (5, 5), (1, 1), "VALID"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Conv2D(50, (5, 5), (1, 1), "VALID"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Flatten(), Linear(500), ReLU(), Linear(10), Softmax() ] model = MLP(layers, (None, 28, 28, 1)) mnist = MNIST(train_start=0, train_end=60000, test_start=0, test_end=10000) x_train, y_train = mnist.get_set('train') x_test, y_test = mnist.get_set('test') model_training(model, file_name, x_train, y_train, x_test, y_test, nb_epochs=20, batch_size=128, learning_rate=0.001)
def model_train(attack): """ Creates the joblib file of LeNet-5 trained over the augmented MNIST dataset. Parameters ---------- attack: str The augmented dataset used (either "jsma", "wjsma" or "tjsma"). """ layers = [ Conv2D(20, (5, 5), (1, 1), "VALID"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Conv2D(50, (5, 5), (1, 1), "VALID"), ReLU(), MaxPooling2D((2, 2), (2, 2), "VALID"), Flatten(), Linear(500), ReLU(), Linear(10), Softmax() ] model = MLP(layers, (None, 28, 28, 1)) mnist = MNIST(train_start=TRAIN_START, train_end=TRAIN_END, test_start=TEST_START, test_end=TEST_END) x_train, y_train = mnist.get_set('train') x_test, y_test = mnist.get_set('test') x_add = np.load("defense/augmented/" + attack + "_x.npy")[:AUGMENT_SIZE] y_add = np.load("defense/augmented/" + attack + "_y.npy")[:AUGMENT_SIZE] x_train = np.concatenate((x_train, x_add.reshape(x_add.shape + (1,))), axis=0).astype(np.float32) y_train = np.concatenate((y_train, y_add), axis=0).astype(np.float32) model_training(model, "mnist_defense_" + attack + ".joblib", x_train, y_train, x_test, y_test, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)