Beispiel #1
0
    def __init__(self,
                 restore=None,
                 session=None,
                 use_log=False,
                 use_brelu=False):
        def bounded_relu(x):
            return K.relu(x, max_value=1)

        if use_brelu:
            activation = bounded_relu
        else:
            activation = 'relu'
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model.add(Activation(activation))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.model = model
        self.layer_outputs = layer_outputs
Beispiel #2
0
    def __init__(self, model=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        if not use_softmax:
            model.layers.pop()

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #3
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        model.add(
            Conv2D(6, (5, 5),
                   padding='valid',
                   activation='relu',
                   kernel_initializer='he_normal',
                   input_shape=(28, 28, 1),
                   name='l1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l2'))
        model.add(
            Conv2D(16, (5, 5),
                   padding='valid',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='l3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l4'))
        model.add(Flatten())
        model.add(
            Dense(120,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='l5'))
        model.add(
            Dense(84,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='l6'))
        model.add(
            Dense(10,
                  activation='softmax',
                  kernel_initializer='he_normal',
                  name='l7'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #4
0
    def __init__(self,
                 restore=None,
                 session=None,
                 use_softmax=False,
                 use_brelu=False,
                 activation="relu",
                 de=False,
                 attack='fgsm',
                 epoch=49):
        def bounded_relu(x):
            return K.relu(x, max_value=1)

        if use_brelu:
            activation = bounded_relu
        else:
            activation = activation

        print("inside CIFARModel: activation = {}".format(activation))

        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.float32, shape=(None, 10))

        input_shape, nb_classes = get_shape('cifar10')

        model = model_dict[restore](input_shape, nb_classes, False)
        preds = model(x)
        if de == True:
            model_path = '../../de_models/' + attack + '/' + 'cifar10_' + restore + '/' + str(
                epoch) + '/' + restore + '.model'
        else:
            model_path = '/mnt/dyz/models/' + 'cifar10_' + restore + '/' + str(
                epoch) + '/' + restore + '.model'
        saver = tf.train.Saver()
        saver.restore(session, model_path)
        print("load model successfully")
        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #5
0
    def __init__(self, restore=None, session=None, use_softmax=False, use_brelu = False, activation = "relu"):
        def bounded_relu(x):
                return K.relu(x, max_value=1)
        if use_brelu:
            activation = bounded_relu
        else:
            activation = activation

        print("inside CIFARModel: activation = {}".format(activation))

        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(64, (3, 3),
                                input_shape=(32, 32, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(10))
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        kernel_size = (5, 5)
        drop_rate = 0.3
        model.add(
            Conv2D(32,
                   kernel_size,
                   activation='relu',
                   padding='same',
                   name='block1_conv1',
                   input_shape=(28, 28, 1)))  # 1
        model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool1'))  # 2
        model.add(Dropout(drop_rate))

        # block2
        model.add(
            Conv2D(64,
                   kernel_size,
                   activation='relu',
                   padding='same',
                   name='block2_conv1'))  # 4
        model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool1'))  # 5
        model.add(Dropout(drop_rate))

        model.add(Flatten(name='flatten'))

        model.add(Dense(120, activation='relu', name='fc1'))  # -5
        model.add(Dropout(drop_rate))
        model.add(Dense(84, activation='relu', name='fc2'))  # -3
        model.add(Dense(10, name='before_softmax'))  # -2
        model.add(Activation('softmax', name='predictions'))  #
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #7
0
    def __init__(self, model, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        # output log probability, used for black-box attack
        if not use_softmax:
            model.layers.pop()

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.model = model
        self.layer_outputs = layer_outputs
    def __init__(self,
                 params,
                 restore=None,
                 session=None,
                 use_log=False,
                 image_size=28,
                 image_channel=1,
                 activation='relu'):

        self.image_size = image_size
        self.num_channels = image_channel
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(image_size, image_size, image_channel)))
        # list of all hidden units weights
        self.U = []
        for param in params:
            # add each dense layer, and save a reference to list U
            self.U.append(Dense(param))
            model.add(self.U[-1])
            # ReLU activation
            # model.add(Activation(activation))
            if activation == "arctan":
                model.add(Lambda(lambda x: tf.atan(x)))
            else:
                model.add(Activation(activation))
        self.W = Dense(10)
        model.add(self.W)
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #9
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28*28]
        model = Sequential()
        model.add(Dense(512, activation='relu', input_shape=(28*28,), name='dense_1'))
        model.add(Dropout(0.2, name='d1'))
        model.add(Dense(512, activation='relu', name='dense_2'))
        model.add(Dropout(0.2, name='d2'))
        model.add(Dense(10, activation='softmax', name='dense_3'))
        if restore:
            model.load_weights(restore, by_name=True)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #10
0
    def __init__(self, restore = None, session=None, use_log=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(32, 32, 3)))
        model.add(Dense(1024))
        model.add(Activation('softplus'))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
    s1 = style_loss(style_features, gen_img_features)
    # We need to devide the loss by the number of layers that we take into account
    loss += (STYLE_WEIGHT / len(feature_layer_names)) * s1
loss += TV_WEIGHT * total_variation_loss(gen_img)

# Calculate gradients
grads = K.gradients(loss, gen_img)

outputs = [loss]
if isinstance(grads, (list, tuple)):
    outputs += grads
else:
    outputs.append(grads)

# Define a Keras function
f_output = K.function([gen_img], outputs)


def eval_loss_and_grads(x):
    if K.image_data_format() == 'channels_first':
        x = x.reshape((1, 3, img_h, img_w))
    else:
        x = x.reshape((1, img_h, img_w, 3))
    # Update the loss and the gradients
    outs = f_output([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values
Beispiel #12
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10
        self.shape = [None, 32, 32, self.num_channels]
        model = Sequential()
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block1_conv1',
                   input_shape=(32, 32, 3)))  # 1
        model.add(BatchNormalization(name="batch_normalization_1"))  # 2
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block1_conv2'))  # 3
        model.add(BatchNormalization(name="batch_normalization_2"))
        model.add(MaxPooling2D((2, 2), strides=(2, 2),
                               name='block1_pool'))  # 4
        # Block 2
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block2_conv1'))  # 5
        model.add(BatchNormalization(name="batch_normalization_3"))
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block2_conv2'))  # 6
        model.add(BatchNormalization(name="batch_normalization_4"))  # 7
        model.add(MaxPooling2D((2, 2), strides=(2, 2),
                               name='block2_pool'))  # 8

        # Block 3
        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv1'))  # 9
        model.add(BatchNormalization(name="batch_normalization_5"))  # 10
        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv2'))  # 11
        model.add(BatchNormalization(name="batch_normalization_6"))
        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv3'))  # 12
        model.add(BatchNormalization(name="batch_normalization_7"))  # 13
        model.add(MaxPooling2D((2, 2), strides=(2, 2),
                               name='block3_pool'))  # 14
        model.add(Flatten())  # 15
        model.add(Dense(256, activation='relu', name='dense_1'))  # 16
        model.add(BatchNormalization(name="batch_normalization_8"))  # 17
        model.add(Dense(256, activation='relu', name='dense_2'))  # 18
        model.add(BatchNormalization(name="batch_normalization_9"))  # 19
        model.add(Dense(10, activation='softmax', name='dense_3'))  # 20
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Beispiel #13
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10
        self.shape = [None, 32, 32, self.num_channels]
        model = Sequential()
        weight_decay = 0.0001
        model.add(
            Conv2D(192, (5, 5),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   input_shape=(32, 32, 3),
                   name='l1'))  # x_train.shape[1:]))
        model.add(BatchNormalization(name='l2'))
        model.add(Activation('relu', name='l3'))
        model.add(
            Conv2D(160, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l4'))
        model.add(BatchNormalization(name='l5'))
        model.add(Activation('relu', name='l6'))
        model.add(
            Conv2D(96, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l7'))
        model.add(BatchNormalization(name='l8'))
        model.add(Activation('relu', name='l9'))
        model.add(
            MaxPooling2D(pool_size=(3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='l10'))

        model.add(Dropout(0.3, name='l11'))

        model.add(
            Conv2D(192, (5, 5),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l12'))
        model.add(BatchNormalization(name='l13'))
        model.add(Activation('relu', name='l14'))
        model.add(
            Conv2D(192, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l15'))
        model.add(BatchNormalization(name='l16'))
        model.add(Activation('relu', name='l17'))
        model.add(
            Conv2D(192, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l18'))
        model.add(BatchNormalization(name='l20'))
        model.add(Activation('relu', name='l21'))
        model.add(
            MaxPooling2D(pool_size=(3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='l22'))

        model.add(Dropout(0.3, name='l23'))

        model.add(
            Conv2D(192, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l24'))
        model.add(BatchNormalization(name='l25'))
        model.add(Activation('relu', name='l26'))
        model.add(
            Conv2D(192, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l27'))
        model.add(BatchNormalization(name='l28'))
        model.add(Activation('relu', name='l29'))
        model.add(
            Conv2D(10, (1, 1),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(weight_decay),
                   name='l30'))
        model.add(BatchNormalization(name='l31'))
        model.add(Activation('relu', name='l32'))

        model.add(GlobalAveragePooling2D(name='l33'))
        model.add(Activation('softmax', name='l34'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
    def __init__(self, restore_dae=None, restore_clf=None, session=None, use_softmax=False, activation="relu"):

        print("inside MNISTModelDAE: activation = {}".format(activation))

        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model1 = Sequential()

        model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

        # Encoder
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(AveragePooling2D((2, 2), padding="same"))
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

        # Decoder
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(UpSampling2D((2, 2)))
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

        model1.add(Lambda(lambda x_: x_ - 0.5))

        model1.load_weights(restore_dae)
        model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')


        model2 = Sequential()

        model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model2.add(Activation(activation))
        model2.add(Conv2D(32, (3, 3)))
        model2.add(Activation(activation))
        model2.add(MaxPooling2D(pool_size=(2, 2)))

        model2.add(Conv2D(64, (3, 3)))
        model2.add(Activation(activation))
        model2.add(Conv2D(64, (3, 3)))
        model2.add(Activation(activation))
        model2.add(MaxPooling2D(pool_size=(2, 2)))

        model2.add(Flatten())
        model2.add(Dense(200))
        model2.add(Activation(activation))
        model2.add(Dense(200))
        model2.add(Activation(activation))
        model2.add(Dense(10))
        # output log probability, used for black-box attack
        if use_softmax:
            model2.add(Activation('softmax'))
        if restore:
            model2.load_weights(restore_clf)

        layer_outputs = []
        for layer in model1.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model1.layers[0].input], [layer.output]))
        for layer in model2.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model2.layers[0].input], [layer.output]))

        model = Sequential()
        model.add(model1)
        model.add(model2)
        self.model = model
        self.layer_outputs = layer_outputs