Ejemplo n.º 1
0
def build_graph(cfg):
    class _Dummy:
        pass

    env = _Dummy()

    env.x = tf.placeholder(tf.int32, [cfg.batch_size, cfg.charlen], 'x')
    env.y = tf.placeholder(tf.int32, [cfg.batch_size, 1], 'y')
    env.training = tf.placeholder_with_default(False, (), 'mode')

    m = CharLSTM(cfg)
    env.model = m
    env.ybar = m.predict(env.x, env.training)
    env.saver = tf.train.Saver()
    env = build_metric(env, cfg)

    with tf.variable_scope('deepfool'):
        env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
        env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
        xadv = fgm(m,
                   env.x,
                   epochs=env.adv_epochs,
                   eps=env.adv_eps,
                   sign=cfg.sign,
                   clip_min=-10,
                   clip_max=10)
        env.xadv = m.reverse_embedding(xadv)
    return env
Ejemplo n.º 2
0
def build_graph(cfg):
    class _Dummy:
        pass

    env = _Dummy()

    env.x = tf.placeholder(tf.int32, [None, cfg.seqlen + 1], 'x')
    env.y = tf.placeholder(tf.int32, [None, 1], 'y')
    env.training = tf.placeholder_with_default(False, (), 'mode')

    m = WordCNN(cfg)
    env.ybar = m.predict(env.x, env.training)
    env.model = m

    # we do not save the embedding here since embedding is not trained.
    env.saver = tf.train.Saver(var_list=m.varlist)

    env = build_metric(env, cfg)

    with tf.variable_scope('adv'):
        env.adv_epochs = tf.placeholder(tf.int32, (), name='epochs')
        env.adv_eps = tf.placeholder(tf.float32, (), name='eps')
        env.xadv = fgm(m,
                       env.x,
                       epochs=env.adv_epochs,
                       eps=env.adv_eps,
                       sign=cfg.sign,
                       clip_min=-10,
                       clip_max=10)
    return env
    def __init__(self, input_dims, hidden_neurons, eps=0.1, pfp=0.5):
        self.input_dims = input_dims
        self.hidden_neurons = hidden_neurons
        self.eps = eps
        self.pfp = pfp
        self.new = []
        self.data_t = None
        tf.reset_default_graph()

        with tf.variable_scope('model'):
            self.x = tf.placeholder(tf.float32, (None, input_dims), name='x')
            self.y = tf.placeholder(tf.float32, (None, 1), name='y')
            self.y_pred = self.simple_mlp(self.x)

            # calculate the accuracy
            with tf.variable_scope('acc'):
                self.y_ = tf.where(tf.greater(self.y_pred, 0.5),
                                   tf.ones_like(self.y_pred),
                                   tf.zeros_like(self.y_pred))
                count = tf.equal(self.y, self.y_)
                self.acc = tf.reduce_mean(tf.cast(count, tf.float32),
                                          name='acc')

            with tf.variable_scope('loss'):
                self.entropy_loss = -tf.reduce_mean(
                    self.y *
                    tf.log(tf.clip_by_value(self.y_pred, 1e-10, 1.0)) +
                    (1 - self.y) *
                    tf.log(tf.clip_by_value(1 - self.y_pred, 1e-10, 1.0)))

                self.reg_loss = tf.add_n(
                    tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
                self.loss = self.entropy_loss + self.reg_loss

            with tf.variable_scope('train_op'):
                optimizer = tf.train.AdamOptimizer(0.001)
                self.train_op = optimizer.minimize(self.loss)

        with tf.variable_scope('model', reuse=True):
            self.fgm_eps = tf.placeholder(tf.float32, (), name='fgm_eps')
            self.x_fgm = fgm(self.simple_mlp,
                             self.x,
                             epochs=1,
                             eps=self.fgm_eps)

        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.local_variables_initializer())
Ejemplo n.º 4
0
        #tf.add_to_collection('loss', weight_decay)

    with tf.variable_scope('train_op'):
        #optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)

        optimizer = tf.train.AdamOptimizer()
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
    env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
    env.adv_y = tf.placeholder(tf.int32, (), name='adv_y')

    env.x_fgsm = fgm(model, env.x, epochs=env.adv_epochs, eps=env.adv_eps)
    env.x_deepfool = deepfool(model, env.x, epochs=env.adv_epochs, batch=True)
    env.x_jsma = jsma(model,
                      env.x,
                      env.adv_y,
                      eps=env.adv_eps,
                      epochs=env.adv_epochs)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())


def evaluate(sess, env, X_data, y_data, batch_size=128):
Ejemplo n.º 5
0
def get_adv_dataset(sess, logits, x, y, x_test, y_test):
    return sess.run(attacks.fgm(x, logits, eps=0.2, ord=np.inf, clip_min=0, clip_max=1, targeted=False),
                    feed_dict={x: x_test, y: y_test})
Ejemplo n.º 6
0
                                                       logits=logits)
        env.loss = tf.reduce_mean(xent, name='loss')

    with tf.variable_scope('train_op'):
        # optimizer = tf.train.AdamOptimizer()
        optimizer = tf.train.GradientDescentOptimizer(0.01)
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.fgsm_eps = tf.placeholder(tf.float32, (), name='fgsm_eps')
    env.fgsm_epochs = tf.placeholder(tf.int32, (), name='fgsm_epochs')
    with slim.arg_scope(inception_resnet_v2_arg_scope()):
        env.x_fgsm = fgm(cifarnet,
                         env.x,
                         epochs=env.fgsm_epochs,
                         eps=env.fgsm_eps)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())


def evaluate(sess, env, X_data, y_data, batch_size=128):
    """
    Evaluate TF model by running env.loss and env.acc.
    """
    print('\nEvaluating')