Ejemplo n.º 1
0
def main(_):
    with tf.Session() as sess:
        K.set_session(sess)
        if FLAGS.dataset == 'MNIST':
            data, model =  MNIST(), MNISTModel("models/mnist", sess)
        elif FLAGS.datset == 'Cifar':
            data, model =  CIFAR(), CIFARModel("models/cifar", sess)


        def _model_fn(x, logits=False):
            ybar, logits_ = model.predict(x)
            if logits:
                return ybar, logits_
            return ybar

        
        if FLAGS.dataset == 'MNIST':
            x_adv = fgsm(_model_fn, x, epochs=9, eps=0.02)
        elif FLAGS.datset == 'Cifar':
            x_adv = fgsm(_model_fn, x, epochs=4, eps=0.01)

        X_adv_test = attack(x_adv, data.test_data, data.test_labels, sess)
        X_adv_train = attack(x_adv, data.train_data, data.train_labels, sess)

        np.save('adversarial_outputs/fgsm_train_' + FLAGS.dataset.lower() + '.npy', X_adv_train)
        np.save('adversarial_outputs/fgsm_test_' + FLAGS.dataset.lower() + '.npy', X_adv_test)
        print("Legit/Adversarial training set")
        model.evaluate(data.train_data, data.train_labels)
        model.evaluate(X_adv_train, data.train_labels)
        
        print("Legit/Adversarial test set")
        model.evaluate(data.test_data, data.test_labels)
        model.evaluate(X_adv_test, data.test_labels)
Ejemplo n.º 2
0
    model.add(Dense(10))
    model.add(Activation('softmax'))
    model.compile(optimizer='adam', loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(X_train, y_train, nb_epoch=nb_epoch)

    os.makedirs('model', exist_ok=True)
    model.save('model/ex_00.h5')
else:
    model = load_model('model/ex_00.h5')


x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
x_adv = fgsm(model, x, y, nb_epoch=4, eps=0.1)


print('Testing...')
score = model.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))


nb_sample = X_test.shape[0]
nb_batch = int(np.ceil(nb_sample/batch_size))
X_adv = np.empty(X_test.shape)
for batch in range(nb_batch):
    print('batch {0}/{1}'.format(batch+1, nb_batch), end='\r')
    start = batch * batch_size
    end = min(nb_sample, start+batch_size)
    tmp = sess.run(x_adv, feed_dict={x: X_test[start:end],
                   metrics=['accuracy'])

    earlystopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
    model0.fit(X_train,
               y_train,
               nb_epoch=100,
               validation_split=0.1,
               callbacks=[earlystopping])

    print('\nSaving model0')
    os.makedirs('model', exist_ok=True)
    model0.save('model/table_1_svhn_model0.h5')

x = tf.placeholder(tf.float32, (None, img_rows, img_cols, img_chan))
eps = tf.placeholder(tf.float32, ())
x_adv = fgsm(model0, x, nb_epoch=9, eps=eps, clip_min=-1., clip_max=1.)

print('\nTesting against clean test data')
score = model0.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))

EPS = 0.01

if False:
    print('\nLoading adversarial dataset')
    db = np.load('data/table_1_svhn_{0:.4f}.npz'.format(EPS))
    X_train_adv = db['X_train_adv']
    X_test_adv = db['X_test_adv']
else:
    print('\nBuilding X_train_adv')
    nb_sample = X_train.shape[0]
Ejemplo n.º 4
0
                   metrics=['accuracy'])

    earlystopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
    model0.fit(X_train,
               y_train,
               nb_epoch=100,
               validation_split=0.1,
               callbacks=[earlystopping])

    print('\nSaving model0')
    os.makedirs('model', exist_ok=True)
    model0.save('model/table_2_model0.h5')

x = tf.placeholder(tf.float32, (None, img_rows, img_cols, img_chan))
eps = tf.placeholder(tf.float32, ())
x_adv = fgsm(model0, x, nb_epoch=9, eps=eps)

print('\nTesting against clean test data')
score = model0.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))

if False:
    for EPS in [0.01, 0.03, 0.1, 0.3]:
        print('\nBuilding X_train_adv with eps={0:.2f}'.format(EPS))
        nb_sample = X_train.shape[0]
        batch_size = 128
        nb_batch = int(np.ceil(nb_sample / batch_size))
        X_train_adv = np.empty(X_train.shape)
        for batch in range(nb_batch):
            print(' batch {0}/{1}'.format(batch + 1, nb_batch), end='\r')
            start = batch * batch_size
Ejemplo n.º 5
0
    os.makedirs('model', exist_ok=True)
    model.save('model/ex_00.h5')

x = tf.placeholder(tf.float32, (None, img_rows, img_cols, img_chas))
y = tf.placeholder(tf.float32, (None, nb_classes))


def _model_fn(x, logits=False):
    ybar = model(x)
    logits_, = ybar.op.inputs
    if logits:
        return ybar, logits_
    return ybar


x_adv = fgsm(_model_fn, x, epochs=9, eps=0.02)

print('\nTest against clean data')
score = model.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))

nb_sample = X_test.shape[0]
batch_size = 128
nb_batch = int(np.ceil(nb_sample / batch_size))
X_adv = np.empty(X_test.shape)
for batch in range(nb_batch):
    print('batch {0}/{1}'.format(batch + 1, nb_batch), end='\r')
    start = batch * batch_size
    end = min(nb_sample, start + batch_size)
    tmp = sess.run(x_adv,
                   feed_dict={
Ejemplo n.º 6
0
x = tf.placeholder(tf.float32, (None, img_rows, img_cols))
y = tf.placeholder(tf.float32, (None, nb_classes))
eps = tf.placeholder(tf.float32, ())
sess.run(tf.global_variables_initializer())


def _model_fn(x, logits=False):
    ybar = model(x)
    #logits_,  = ybar.op.inputs
    logits_, tmp = ybar.op.inputs
    if logits:
        return ybar, logits_
    return ybar


x_adv = fgsm(_model_fn, x, epochs=4, eps=0.01)

max_X = np.max(X_test.flatten())
min_X = np.min(X_test.flatten())
X_test_scaled = (X_test - min_X) / (max_X - min_X)

nb_sample = X_test_scaled.shape[0]
nb_batch = int(np.ceil(nb_sample / batch_size_adv))
X_adv = np.empty(X_test_scaled.shape)
for batch in range(nb_batch):
    print('batch {0}/{1}'.format(batch + 1, nb_batch), end='\r')
    start = batch * batch_size_adv
    end = min(nb_sample, start + batch_size_adv)
    tmp = sess.run(x_adv,
                   feed_dict={
                       x: X_test_scaled[start:end],
Ejemplo n.º 7
0
    earlystopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              validation_split=0.1,
              callbacks=[earlystopping])

    os.makedirs('model', exist_ok=True)
    model.save('model/ex_06.h5')

x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, img_channels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eps = tf.placeholder(tf.float32, ())
x_adv = fgsm(model, x, y='max', nb_epoch=4, eps=0.01)

print('Testing...')
score = model.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))

if adv_saved:
    with gzip.open('data/ex_06.pkl.gz', 'rb') as r:
        X_adv, y_adv, y_pred, _ = pickle.load(r)
        X_adv, y_adv = np.array(X_adv), np.array(y_adv)
        y_pred = np.array(y_pred)
else:
    print('generating adversarial data')
    nb_sample = X_test.shape[0]
    nb_batch = int(np.ceil(nb_sample / batch_size))
    X_adv = np.empty(X_test.shape)
Ejemplo n.º 8
0
    ])

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print('\nTraining model')
    model.fit(X_train, y_train, nb_epoch=10)

    print('\nSaving model')
    os.makedirs('model', exist_ok=True)
    model.save('model/ex_00.h5')

x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, img_chas))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
x_adv = fgsm(model, x, nb_epoch=12, eps=0.02)

print('\nTest against clean data')
score = model.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))

nb_sample = X_test.shape[0]
batch_size = 128
nb_batch = int(np.ceil(nb_sample / batch_size))
X_adv = np.empty(X_test.shape)
for batch in range(nb_batch):
    print('batch {0}/{1}'.format(batch + 1, nb_batch), end='\r')
    start = batch * batch_size
    end = min(nb_sample, start + batch_size)
    tmp = sess.run(x_adv,
                   feed_dict={
Ejemplo n.º 9
0
                             training=env.training)

    z = tf.argmax(env.y, axis=1)
    zbar = tf.argmax(env.ybar, axis=1)
    count = tf.cast(tf.equal(z, zbar), tf.float32)
    env.acc = tf.reduce_mean(count, name='acc')

    xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
                                                   logits=logits)
    env.loss = tf.reduce_mean(xent, name='loss')

env.optim = tf.train.AdamOptimizer().minimize(env.loss)

# Note the reuse=True flag
with tf.variable_scope('model', reuse=True):
    env.x_adv = fgsm(model, env.x, epochs=12, eps=0.02)

# --------------------------------------------------------------------

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())

# --------------------------------------------------------------------

def _evaluate(X_data, y_data, env):
    print('\nEvaluating')
    n_sample = X_data.shape[0]
    batch_size = 128
    n_batch = int(np.ceil(n_sample/batch_size))
    loss, acc = 0, 0