def setUp(self):
        super(TestMadryEtAlMultiGPU, self).setUp()

        class SimpleLayer(LayernGPU):
            def __init__(self):
                super(SimpleLayer, self).__init__()

            def set_input_shape(self, input_shape):
                self.input_shape = input_shape
                self.output_shape = input_shape
                self.W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
                self.W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
                                      dtype=tf.float32)

            def fprop_noscope(self, x):
                h1 = tf.nn.sigmoid(tf.matmul(x, self.W1))
                res = tf.matmul(h1, self.W2)
                return res

        input_shape = (None, 2)
        self.model_ngpu = MLPnGPU([SimpleLayer()], input_shape)

        self.attack_single_gpu = self.attack
        self.attack_multi_gpu = MadryEtAlMultiGPU(self.model_ngpu,
                                                  sess=self.sess)
        self.attack = self.attack_multi_gpu
Esempio n. 2
0
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):
    """
    Create a multi-GPU model similar to the basic cnn in the tutorials.
    """
    model = make_basic_cnn()
    layers = model.layers

    model = MLPnGPU(nb_classes, layers, input_shape)
    return model
Esempio n. 3
0
def make_madry_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):
    """
    Create a multi-GPU model similar to Madry et al. (arXiv:1706.06083).
    """
    layers = [Conv2DnGPU(32, (5, 5), (1, 1), "SAME"),
              ReLU(),
              MaxPool((2, 2), (2, 2), "SAME"),
              Conv2DnGPU(64, (5, 5), (1, 1), "SAME"),
              ReLU(),
              MaxPool((2, 2), (2, 2), "SAME"),
              Flatten(),
              LinearnGPU(1024),
              ReLU(),
              LinearnGPU(nb_classes),
              Softmax()]

    model = MLPnGPU(layers, input_shape)
    return model