Exemple #1
0
def gaws(rv, window_size, word_embedding, hidden_size, restore_name, grad, k,
         d):
    rnn_sentiment = 'pos' if not d[0] else 'neg'
    args = np.argsort(grad)  #sorted from smallest to largest
    args = np.flip(args, axis=0)
    k = min(k, args.size)  # new line
    window_size = min(window_size, rv.length)
    g = tf.Graph()
    with g.as_default():
        global_step_tensor = tf.Variable(0,
                                         trainable=False,
                                         name='global_step')
        r = rnn.classifier(batch_size=10000,
                           learning_rate=0.0,
                           hidden_size=hidden_size,
                           max_time=window_size,
                           embeddings=word_embedding,
                           global_step=global_step_tensor)

        with tf.Session() as sess:
            tf.train.Saver().restore(sess, restore_name)
            print('Running window attack...')
            ii = [0] * k
            jj = [0] * k
            if rv.sentiment == 'pos':
                pp = [float('inf')] * k
            else:
                pp = [-float('inf')] * k

            for i in range(min(rv.length, k)):
                d, p, _ = r.infer_window(sess, rv, args[i], window_size)
                #rnn_sent = 'pos' if not d[rv.index_vector[0,0]] else 'neg'
                #if rnn_sent != rv.sentiment:
                #   print('RNN sentiment: ',rnn_sent,'Review sentiment: ',rv.sentiment)
                #   print('Neural Net was wrong')
                #   return None,None,None
                p = p[:, 0]
                pmin = np.amin(p, axis=0)
                pmax = np.amax(p, axis=0)
                if rv.sentiment == 'pos':
                    ii[i] = np.argmin(p)
                    pp[i] = pmin
                else:
                    ii[i] = np.argmax(p)
                    pp[i] = pmax
                jj[i] = i
    ii = np.array(ii)
    jj = np.array(jj)
    pp = np.array(pp)
    return (ii, jj, pp)
Exemple #2
0
def win_atk(rv, window_size, word_embedding, hidden_size, restore_name):
    window_size = min(window_size, rv.length)
    g = tf.Graph()
    with g.as_default():
        global_step_tensor = tf.Variable(0,
                                         trainable=False,
                                         name='global_step')
        r = rnn.classifier(batch_size=10000,
                           learning_rate=0.0,
                           hidden_size=hidden_size,
                           max_time=window_size,
                           embeddings=word_embedding,
                           global_step=global_step_tensor)

        with tf.Session() as sess:
            tf.train.Saver().restore(sess, restore_name)
            print('Running window attack...')
            ii = [0] * rv.length
            jj = [0] * rv.length
            if rv.sentiment == 'pos':
                pp = [float('inf')] * rv.length
            else:
                pp = [-float('inf')] * rv.length

            for i in range(rv.length):
                d, p, g = r.infer_window(sess, rv, i, window_size)
                rnn_sent = 'pos' if not d[rv.index_vector[0, 0]] else 'neg'
                if rnn_sent != rv.sentiment:
                    print('RNN sentiment: ', rnn_sent, 'Review sentiment: ',
                          rv.sentiment)
                    print('Neural Net was wrong')
                    return None, None, None
                p = p[:, 0]
                pmin = np.amin(p, axis=0)
                pmax = np.amax(p, axis=0)
                if rv.sentiment == 'pos':
                    ii[i] = np.argmin(p)
                    pp[i] = pmin
                else:
                    ii[i] = np.argmax(p)
                    pp[i] = pmax
                jj[i] = i
    ii = np.array(ii)
    jj = np.array(jj)
    pp = np.array(pp)
    return (ii, jj, pp)
Exemple #3
0
# Load the word embedding
emb_dict = np.load('emb_dict.npy').item()
embeddings = np.load('final_embeddings.npy')

for hidden_size,lr in zip([2,4,8,16,32,64,128]*2,[0.01,0.001]*7):
  # Reset the TensorFlow graph
  g = tf.Graph()
  tf.reset_default_graph()
  with g.as_default():
    # Set global step to zero, for keeping track of training progress
    global_step_tensor = tf.Variable(0, trainable = False, name = 'global_step')
    # Make the RNN
    r = rnn.classifier(
      batch_size = batch_size,
      learning_rate = lr,
      hidden_size = hidden_size,
      max_time = max_time,
      embeddings = embeddings,
      global_step = global_step_tensor
    )

    # Training session
    with tf.Session() as sess:
      saver = tf.train.Saver(max_to_keep = 200)
      train_writer = tf.summary.FileWriter(sys.argv[1]+'/train_'+str(hidden_size)+'_'+str(int(lr*1000)))
      test_writer = tf.summary.FileWriter(sys.argv[1]+'/test_'+str(hidden_size)+'_'+str(int(lr*1000)))
      sess.run(tf.global_variables_initializer())
      for epoch in range(50): 
        saver.save(sess, './'+sys.argv[2]+'_'+str(hidden_size)+'_'+str(int(lr*1000))+"/imdb-rnn-e%d.ckpt"%epoch)
        print('epoch: ',epoch)
        # Testing #
        sess.run(tf.local_variables_initializer())
Exemple #4
0
    # Following are conditions where one-word replacement worked.
    if prob_positive < 0.5 and rv.sentiment == 'pos':
        continue
    elif prob_positive > 0.5 and rv.sentiment == 'neg':
        continue
    g = tf.Graph()
    tf.reset_default_graph()
    rv.translate(1024, word_to_embedding_index, embedding_index_to_word)
    with g.as_default():
        global_step_tensor = tf.Variable(0,
                                         trainable=False,
                                         name='global_step')
        # Create RNN graph
        r = rnn.classifier(batch_size=rv.length,
                           learning_rate=0.0,
                           hidden_size=16,
                           max_time=1024,
                           embeddings=word_embedding,
                           global_step=global_step_tensor)
        with tf.Session() as sess:
            tf.train.Saver().restore(
                sess, './ckpts/gridckpt_16_10/imdb-rnn-e15.ckpt')
            rv.translate(r.max_time, word_to_embedding_index,
                         embedding_index_to_word)
            rv.vec(word_embedding)
            decision, probability = \
                r.infer_rep_dpg(sess,rv,rv.index_vector[0])

            #grad = batch_grad[0][0,0:rv.length,:]
            #W = word_embedding; G = grad
            #D = W @ (G.T)
            #c = np.sum(np.multiply(rv.vector_list,G),axis=1)
Exemple #5
0
 rv = rp.review('./aclImdb/test/posneg/' + test_file)
 per = (rv.length) // divs
 rv.translate(rv.length, word_to_embedding_index,
              embedding_index_to_word)
 rv.vec(word_embedding)
 min_arr = np.array([float('inf')] * divs)
 max_arr = np.array([float('-inf')] * divs)
 g = tf.Graph()
 with g.as_default():
     global_step_tensor = tf.Variable(0,
                                      trainable=False,
                                      name='global_step')
     # Create RNN graph
     r = rnn.classifier(batch_size=top_k * divs,
                        learning_rate=0.0,
                        hidden_size=16,
                        max_time=per,
                        embeddings=word_embedding,
                        global_step=global_step_tensor)
     with tf.Session() as sess:
         tf.train.Saver().restore(\
             sess, './ckpts/gridckpt_16_10/imdb-rnn-e15.ckpt')
         print('Processing ' + test_file)
         ii = [0] * divs
         jj = [0] * divs
         for ins_location in range(per):
             print(ins_location)
             _, p, _, im, rep_vec = r.infer_swap(
                 sess, rv, ins_location, divs, top_k)
             p = p[:, 0]
             p = np.reshape(p, (r.batch_size // divs, divs), 'F')
             min_arr = np.minimum(np.amin(p, axis=0), min_arr)