Esempio n. 1
0
def evaluate():
  # Read vocabulary
  vocab, rev_vocab = _load_vocabulary(FLAGS.vocab_fname)

  with tf.Graph().as_default() as g:
    #Enque data for evaluation
    num_examples_per_epoch, tower_img_embedding, tower_context_length, \
        tower_caption_length, tower_context_id, tower_caption_id, \
        tower_answer_id, tower_context_mask, \
        tower_caption_mask = enqueue(True)

    tower_argmax = []
    # Calculate the gradients for each model tower.
    with tf.variable_scope(tf.get_variable_scope()) as scope:
      for i in xrange(FLAGS.num_gpus):
        with tf.device('/gpu:%d' % i):
          with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
            inputs = [
                tower_img_embedding[i],
                tower_context_length[i],
                tower_caption_length[i],
                tower_context_id[i],
                tower_caption_id[i],
                tower_answer_id[i],
                tower_context_mask[i],
                tower_caption_mask[i]
            ]
            net = CSMN(inputs, ModelConfig(FLAGS), is_training= False)
            argmax = net.argmax
            # Reuse variables for the next tower.
            tf.get_variable_scope().reuse_variables()

            # Keep track of the gradients across all towers.
            tower_argmax.append(argmax)
    argmaxs = tf.concat(tower_argmax, 0)
    answer_ids = tf.concat(tower_answer_id, 0)
    saver = tf.train.Saver(tf.global_variables())

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

    #Don't evaluate again for the same checkpoint.
    b_g_s = "0"
    while True:
      c_g_s = _eval_once(
          saver, summary_writer, argmaxs, answer_ids, vocab,
          rev_vocab, num_examples_per_epoch, b_g_s
      )
      b_g_s = c_g_s
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Esempio n. 2
0
def tower_loss(inputs, scope):
    '''
    Runs the data through the model and returns loss.
    Inputs:
        inputs  - tf.data.Dataset iterator. It is dictionary containing input data.
        scope   - scope
    Outputs:
        loss    - model loss
    '''
    net = CSMN(inputs, ModelConfig(FLAGS))
    loss = net.loss
    tf.summary.scalar(scope + 'loss', loss)
    return loss
Esempio n. 3
0
def evaluate():
    vocab, rev_vocab = load_vocabulary(FLAGS.vocab_fname)

    with tf.Graph().as_default() as g:
        n, data_iter = dr.get_data('test2.txt', False)

        tower_argmax = []
        tower_ans_id = []

        with tf.variable_scope(tf.get_variable_scope()) as scope:
            for i in range(FLAGS.NUM_GPUS):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as var_scope:
                        inp = data_iter.get_next()
                        net = CSMN(inp, ModelConfig(FLAGS), is_training=False)
                        argmax = net.argmax
                        tf.get_variable_scope().reuse_variables()

                        tower_argmax.append(argmax)
                        tower_ans_id.append(inp['answer_id'])

        argmaxs = tf.concat(tower_argmax, 0)
        answer_ids = tf.concat(tower_ans_id, 0)
        saver = tf.train.Saver(tf.global_variables())

        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        b_g_s = "0"

        while True:
            c_g_s = eval_once(saver, summary_writer, argmaxs, answer_ids,
                              vocab, rev_vocab, n, b_g_s)
            b_g_s = c_g_s
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
Esempio n. 4
0
def _tower_loss(inputs, scope):
    net = CSMN(inputs, ModelConfig(FLAGS))
    loss = net.loss
    tf.summary.scalar(scope + 'loss', loss)
    return loss