def __init__(self, scope, nb_classes, nb_filters, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals()) self.nb_filters = nb_filters self.fprop(self.make_input_placeholder()) self.params = self.get_params()
def __init__(self, scope, nb_classes, nb_filters, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals()) self.nb_filters = nb_filters # Do a dummy run of fprop to make sure the variables are created from # the start self.fprop(tf.placeholder(tf.float32, [128, 28, 28, 1])) # Put a reference to the params in self so that the params get pickled self.params = self.get_params()
def __init__(self, nb_classes=10): # NOTE: for compatibility with Madry Lab downloadable checkpoints, # we cannot use scopes, give these variables names, etc. self.W_conv1 = self._weight_variable([5, 5, 1, 32]) self.b_conv1 = self._bias_variable([32]) self.W_conv2 = self._weight_variable([5, 5, 32, 64]) self.b_conv2 = self._bias_variable([64]) self.W_fc1 = self._weight_variable([7 * 7 * 64, 1024]) self.b_fc1 = self._bias_variable([1024]) self.W_fc2 = self._weight_variable([1024, nb_classes]) self.b_fc2 = self._bias_variable([nb_classes]) Model.__init__(self, '', nb_classes, {}) self.dataset_factory = Factory(MNIST, {"center": False})
def __init__(self, nb_classes=10, nb_filters=64, dummy_input=tf.zeros((32, 28, 28, 1))): Model.__init__(self, nb_classes=nb_classes) # Parametes # number of filters, number of classes. self.nb_filters = nb_filters self.nb_classes = nb_classes # Lists for layers attributes. # layer names , layers, layer activations self.layer_names = [ 'input', 'conv_1', 'conv_2', 'conv_3', 'flatten', 'logits' ] self.layers = {} self.layer_acts = {} # layer definitions self.layers['conv_1'] = tf.layers.Conv2D(filters=self.nb_filters, kernel_size=8, strides=2, padding='same', activation=tf.nn.relu) self.layers['conv_2'] = tf.layers.Conv2D(filters=self.nb_filters * 2, kernel_size=6, strides=2, padding='valid', activation=tf.nn.relu) self.layers['conv_3'] = tf.layers.Conv2D(filters=self.nb_filters * 2, kernel_size=5, strides=1, padding='valid', activation=tf.nn.relu) self.layers['flatten'] = tf.layers.Flatten() self.layers['logits'] = tf.layers.Dense(self.nb_classes, activation=None) # Dummy fprop to activate the network. self.fprop(dummy_input)
def __init__(self, scope='simple', nb_classes=2, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals())
def __init__(self, scope, nb_classes=1000, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals())
def __init__(self, scope='dummy_model', nb_classes=10, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals())
def __init__(self, scope, nb_classes, nb_filters=200, **kwargs): del kwargs Model.__init__(self, scope, nb_classes, locals()) self.nb_filters = nb_filters