Exemple #1
0
def build_graph(cfg):
    class _Dummy:
        pass

    env = _Dummy()

    env.x = tf.placeholder(tf.int32, [cfg.batch_size, cfg.charlen], 'x')
    env.y = tf.placeholder(tf.int32, [cfg.batch_size, 1], 'y')
    env.training = tf.placeholder_with_default(False, (), 'mode')

    m = CharLSTM(cfg)
    env.ybar = m.predict(env.x, env.training)
    env.saver = tf.train.Saver()
    env = build_metric(env, cfg)

    with tf.variable_scope('deepfool'):
        env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
        env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
        xadv = deepfool(m,
                        env.x,
                        epochs=env.adv_epochs,
                        eps=env.adv_eps,
                        batch=True,
                        clip_min=-10,
                        clip_max=10)
        env.xadv = m.reverse_embedding(xadv)
    return env
Exemple #2
0
def build_graph(cfg):
    class _Dummy:
        pass

    env = _Dummy()

    env.x = tf.placeholder(tf.int32, [None, cfg.seqlen + 1], 'x')
    env.y = tf.placeholder(tf.int32, [None, 1], 'y')
    env.training = tf.placeholder_with_default(False, (), 'mode')

    m = WordCNN(cfg)
    env.ybar = m.predict(env.x, env.training)
    env.model = m

    # we do not save the embedding here since embedding is not trained.
    env.saver = tf.train.Saver(var_list=m.varlist)

    env = build_metric(env, cfg)

    with tf.variable_scope('deepfool'):
        env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
        env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
        env.xadv = deepfool(m,
                            env.x,
                            epochs=env.adv_epochs,
                            eps=env.adv_eps,
                            batch=True,
                            clip_min=-10,
                            clip_max=10)
    return env
Exemple #3
0
    with tf.variable_scope('train_op'):
        #optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)

        optimizer = tf.train.AdamOptimizer()
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
    env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
    env.adv_y = tf.placeholder(tf.int32, (), name='adv_y')

    env.x_fgsm = fgm(model, env.x, epochs=env.adv_epochs, eps=env.adv_eps)
    env.x_deepfool = deepfool(model, env.x, epochs=env.adv_epochs, batch=True)
    env.x_jsma = jsma(model,
                      env.x,
                      env.adv_y,
                      eps=env.adv_eps,
                      epochs=env.adv_epochs)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())


def evaluate(sess, env, X_data, y_data, batch_size=128):
    """
Exemple #4
0
    with tf.variable_scope('train_op'):
        optimizer = tf.train.AdamOptimizer()
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
    env.adv_D = tf.placeholder(tf.float32, (), name='adv_D')
    env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
    env.adv_y = tf.placeholder(tf.int32, (), name='adv_y')

    env.x_fgsm = fgm(model, env.x, epochs=env.adv_epochs, eps=env.adv_eps)
    env.x_deepfool = deepfool(model,
                              env.x,
                              epochs=env.adv_epochs,
                              batch=True,
                              noise=True,
                              D=env.adv_D)
    env.x_jsma = jsma(model,
                      env.x,
                      env.adv_y,
                      eps=env.adv_eps,
                      epochs=env.adv_epochs)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())

        env.acc = tf.reduce_mean(tf.cast(count, tf.float32), name='acc')

    with tf.variable_scope('loss'):
        xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
                                                       logits=logits)
        env.loss = tf.reduce_mean(xent, name='loss')

    with tf.variable_scope('train_op'):
        optimizer = tf.train.AdamOptimizer()
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
    env.xadv = deepfool(model, env.x, epochs=env.adv_epochs)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())


def evaluate(sess, env, X_data, y_data, batch_size=128):
    """
    Evaluate TF model by running env.loss and env.acc.
    """
    print('\nEvaluating')

    n_sample = X_data.shape[0]
    def setup(self,
              lp,
              eps=None,
              abstain_cost=0,
              deepfool=False,
              pgd_variant=None,
              decay=False,
              variant='prodlog',
              adaptive=False,
              gamma=None,
              only_correct=False,
              eip=None,
              stop=False,
              randomize=True,
              iters=attacks.PGD_ITERS):
        if eps is None:
            eps = attacks.EPS[self.dataset][lp]

        if deepfool:
            adv_inputs = attacks.deepfool(
                self.model,
                self.normalizer,
                self.inputs,
                self.labels,
                self.dataset,
                lp,
                eps,
                abstain=True,
                num_targets=1,
                iters=iters,
            )
        else:
            if pgd_variant is None or isinstance(pgd_variant, str):
                pgd_variants = [pgd_variant]
            elif randomize:
                pgd_variants = [random.choice(pgd_variant)]
            else:
                pgd_variants = pgd_variant

            adv_inputs = []
            batch_size = len(self.labels)
            for variant_index, pgd_variant in enumerate(pgd_variants):
                attack_kwargs = {'iters': iters}

                if eip is not None:
                    eip_variant, prop_iters, eip_prop = eip
                    if (pgd_variant == eip_variant
                            and random.random() < eip_prop):
                        attack_kwargs['iters'] = prop_iters

                attack: Callable[..., torch.Tensor]
                if pgd_variant is None:
                    attack = attacks.pgd
                    if 'iters' not in attack_kwargs:
                        attack_kwargs.update({
                            'eps_iter':
                            eps / 2,
                            'eps_decay':
                            (0.8 if attack_kwargs['iters'] >= 10 else 0.6),
                        })
                else:
                    attack = attacks.pgd_abstain
                    attack_kwargs.update({
                        'variant': pgd_variant,
                        'stop': stop,
                    })

                if pgd_variant in ['sum', 'abstain']:
                    attack_kwargs.update({
                        'inverse_eps_decay':
                        attack_kwargs['iters'] == iters,
                        'early_stop_thresh':
                        0.1,
                        'iters':
                        100,
                    })

                variant_slice = slice(
                    (variant_index * batch_size) // len(pgd_variants),
                    ((variant_index + 1) * batch_size) // len(pgd_variants),
                )

                adv_inputs.append(
                    attack(
                        self.model,
                        self.normalizer,
                        self.inputs[variant_slice],
                        self.labels[variant_slice],
                        self.dataset,
                        lp=lp,
                        eps=eps,
                        abstain=True,
                        **attack_kwargs,
                    ))
            adv_inputs = torch.cat(adv_inputs)

        self.add_input_to_compute(adv_inputs)
    with tf.name_scope('loss'):
        xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
                                                       logits=logits)
        env.loss = tf.reduce_mean(xent, name='loss')

    with tf.name_scope('train_op'):
        env.train_op = tf.train.AdamOptimizer().minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.adv_prob = tf.placeholder(tf.float32, ())
    env.adv_epochs = tf.placeholder(tf.int32, ())
    env.x_adv, env.noise = deepfool(model,
                                    env.x,
                                    noise=True,
                                    ord_=3.4,
                                    epochs=env.adv_epochs,
                                    min_prob=env.adv_prob)

print('\nInitializing graph')
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())

train(sess,
      env,
      X_train,
      y_train,
      X_valid,
      y_valid,
      load=True,
        env.acc = tf.reduce_mean(tf.cast(count, tf.float32), name='acc')

    with tf.variable_scope('loss'):
        xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
                                                       logits=logits)
        env.loss = tf.reduce_mean(xent, name='loss')

    with tf.variable_scope('train_op'):
        optimizer = tf.train.AdamOptimizer()
        env.train_op = optimizer.minimize(env.loss)

    env.saver = tf.train.Saver()

with tf.variable_scope('model', reuse=True):
    env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
    env.noise = deepfool(model, env.x, epochs=env.adv_epochs, batch=True,
                         noise=True)

print('\nInitializing graph')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())


def evaluate(sess, env, X_data, y_data, batch_size=128):
    """
    Evaluate TF model by running env.loss and env.acc.
    """
    print('\nEvaluating')

    n_sample = X_data.shape[0]