def build_graph(cfg): class _Dummy: pass env = _Dummy() env.x = tf.placeholder(tf.int32, [cfg.batch_size, cfg.charlen], 'x') env.y = tf.placeholder(tf.int32, [cfg.batch_size, 1], 'y') env.training = tf.placeholder_with_default(False, (), 'mode') m = CharLSTM(cfg) env.ybar = m.predict(env.x, env.training) env.saver = tf.train.Saver() env = build_metric(env, cfg) with tf.variable_scope('deepfool'): env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs') env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps') xadv = deepfool(m, env.x, epochs=env.adv_epochs, eps=env.adv_eps, batch=True, clip_min=-10, clip_max=10) env.xadv = m.reverse_embedding(xadv) return env
def build_graph(cfg): class _Dummy: pass env = _Dummy() env.x = tf.placeholder(tf.int32, [None, cfg.seqlen + 1], 'x') env.y = tf.placeholder(tf.int32, [None, 1], 'y') env.training = tf.placeholder_with_default(False, (), 'mode') m = WordCNN(cfg) env.ybar = m.predict(env.x, env.training) env.model = m # we do not save the embedding here since embedding is not trained. env.saver = tf.train.Saver(var_list=m.varlist) env = build_metric(env, cfg) with tf.variable_scope('deepfool'): env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs') env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps') env.xadv = deepfool(m, env.x, epochs=env.adv_epochs, eps=env.adv_eps, batch=True, clip_min=-10, clip_max=10) return env
def build_graph(cfg): class _Dummy: pass env = _Dummy() env.x = tf.placeholder(tf.int32, [cfg.batch_size, cfg.charlen], 'x') env.y = tf.placeholder(tf.int32, [cfg.batch_size, 1], 'y') env.training = tf.placeholder_with_default(False, (), 'mode') m = CharLSTM(cfg) env.model = m env.ybar = m.predict(env.x, env.training) env.saver = tf.train.Saver() env = build_metric(env, cfg) return env
def build_graph(cfg): class _Dummy: pass env = _Dummy() env.x = tf.placeholder(tf.int32, [None, cfg.seqlen + 1], 'x') env.y = tf.placeholder(tf.int32, [None, 1], 'y') env.training = tf.placeholder_with_default(False, (), 'mode') m = WordCNN(cfg) env.ybar = m.predict(env.x, env.training) env.model = m # we do not save the embedding here since embedding is not trained. env.saver = tf.train.Saver(var_list=m.varlist) env = build_metric(env, cfg) return env
def build_graph(cfg): class _Dummy: pass env = _Dummy() env.x = tf.placeholder(tf.int32, [cfg.batch_size, cfg.charlen], 'x') env.y = tf.placeholder(tf.int32, [cfg.batch_size, 1], 'y') env.training = tf.placeholder_with_default(False, (), 'mode') m = CharLSTM(cfg) env.model = m env.ybar = m.predict(env.x, env.training) env.saver = tf.train.Saver() env = build_metric(env, cfg) with tf.variable_scope('train_op'): optimizer = tf.train.AdamOptimizer() env.train_op = optimizer.minimize(env.loss) return env
def build_graph(cfg): class _Dummy: pass env = _Dummy() env.x = tf.placeholder(tf.int32, [cfg.batch_size, cfg.charlen], 'x') env.y = tf.placeholder(tf.int32, [cfg.batch_size, 1], 'y') env.training = tf.placeholder_with_default(False, (), 'mode') m = CharLSTM(cfg) env.model = m env.ybar = m.predict(env.x, env.training) env.saver = tf.train.Saver() env = build_metric(env, cfg) with tf.variable_scope('hotflip'): env.xadv = hf_replace(m, env.x, seqlen=cfg.charlen, embedding_dim=cfg.embedding_dim, beam_width=cfg.beam_width, chars=cfg.maxchars) return env