def aiTest(images, shape):
    """

    :param images: ndarray
    :param shape: tuple, like (1000, 28, 28, 1)
    :return:
    """
    x_origin = np.reshape(images, (-1, ) + shape[1:])
    x_origin = x_origin.astype(np.float32) / 255

    print('Construction graph')
    env = Dummy()
    model = lenet5_tensorflow

    with tf.variable_scope('model'):
        env.x = tf.placeholder(tf.float32, (None, ) + shape[1:], name='x')
        env.y = tf.placeholder(tf.float32, (None, N_CLASSES), name='y')
        env.training = tf.placeholder_with_default(False, (), name='mode')
        env.ybar, logits = model(env.x, logits=True, training=env.training)

        with tf.variable_scope('acc'):
            count = tf.equal(tf.argmax(env.y, axis=1),
                             tf.argmax(env.ybar, axis=1))
            env.acc = tf.reduce_mean(tf.cast(count, tf.float32), name='acc')

        with tf.variable_scope('loss'):
            xent = tf.nn.softmax_cross_entropy_with_logits_v2(labels=env.y,
                                                              logits=logits)
            env.loss = tf.reduce_mean(xent, name='loss')

        with tf.variable_scope('train_op'):
            optimizer = tf.train.AdamOptimizer()
            env.train_op = optimizer.minimize(env.loss)

        env.saver = tf.train.Saver()

    with tf.variable_scope('model', reuse=True):
        env.fgsm_eps = tf.placeholder(tf.float32, (), name='fgsm_eps')
        env.fgsm_epochs = tf.placeholder(tf.int32, (), name='fgsm_epochs')
        env.x_fgsm = fgm(model,
                         env.x,
                         epochs=env.fgsm_epochs,
                         eps=env.fgsm_eps)

    print('Initializing graph')
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    print('Loading model')
    env.saver.restore(sess, MODEL_PATH)

    x_adv = make_fgsm(sess,
                      env,
                      x_origin,
                      eps=FGSM_EPSILON,
                      epochs=FGSM_EPOCH,
                      batch_size=FGSM_BATCH_SIZE)

    return x_adv
示例#2
0
    with tf.variable_scope('loss'):
        xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
                                                       logits=logits)
        env.loss = tf.reduce_mean(xent, name='loss')

    with tf.variable_scope('train_op'):
        optimizer = tf.train.AdamOptimizer()
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.fgsm_eps = tf.placeholder(tf.float32, (), name='fgsm_eps')
    env.fgsm_epochs = tf.placeholder(tf.int32, (), name='fgsm_epochs')
    env.x_fgsm = fgm(model, env.x, epochs=env.fgsm_epochs, eps=env.fgsm_eps)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())


def evaluate(sess, env, X_data, y_data, batch_size=128):
    """
    Evaluate TF model by running env.loss and env.acc.
    """
    print('Evaluating')

    n_sample = X_data.shape[0]
示例#3
0
norm_acc = tf.reduce_mean(tf.cast(correct_norm_discr, tf.float32))
adv_acc = tf.reduce_mean(tf.cast(correct_adv_discr, tf.float32))
comb_acc = (norm_acc + adv_acc) / 2

# create a saver
saver = tf.train.Saver()

# initialize graph
init = tf.global_variables_initializer()

# generating adversarial images
fgm_eps = tf.placeholder(tf.float32, ())
fgm_epochs = tf.placeholder(tf.float32, ())
adv_examples = fast_gradient.fgm(x_norm,
                                 final_norm,
                                 sm_norm,
                                 eps=fgm_eps,
                                 epochs=fgm_epochs)

with tf.Session() as sess:
    sess.run(init)

    y_norm_labels = np.squeeze(
        np.stack([[np.array([1, 0])] for _ in range(batch_size)], axis=0))
    y_adv_labels = np.squeeze(
        np.stack([[np.array([0, 1])] for _ in range(batch_size)], axis=0))

    for i in range(epochs):
        print "EPOCH: " + str(i + 1)
        for j in range(mnist.train.num_examples / batch_size):
            print j
示例#4
0
loss = alpha * tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(
        logits=reg_output, labels=reg_y)) + (1 - alpha) * tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=adv_output, labels=adv_y)) - beta * cross_discr_adv
sm_norm = tf.nn.softmax(reg_output)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'reg')
#print(train_vars)
#train_vars.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'adv'))
optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-3).minimize(
    loss, global_step=global_step, var_list=train_vars)
fgm_eps = tf.placeholder(tf.float32, ())
fgm_epochs = tf.placeholder(tf.float32, ())
adv_examples = fast_gradient.fgm(reg_x,
                                 reg_output,
                                 sm_norm,
                                 eps=fgm_eps,
                                 epochs=fgm_epochs)

reg_correct_prediction = tf.equal(reg_y_pred_cls, tf.argmax(reg_y, axis=1))
adv_correct_prediction = tf.equal(adv_y_pred_cls, tf.argmax(adv_y, axis=1))
accuracy_reg = tf.reduce_mean(tf.cast(reg_correct_prediction, tf.float32))
accuracy_adv = tf.reduce_mean(tf.cast(adv_correct_prediction, tf.float32))
accuracy = (accuracy_reg + accuracy_adv) / 2
tf.summary.scalar("Accuracy/class_reg_train", accuracy_reg)
tf.summary.scalar("Accuracy/class_adv_train", accuracy_adv)
tf.summary.scalar("Accuracy/discr_reg_train", discr_accuracy_reg)
tf.summary.scalar("Accuracy/discr_adv_train", discr_accuracy_adv)

merged = tf.summary.merge_all()
saver = tf.train.Saver()