Esempio n. 1
0
def _eval_once(saver, summary_writer, argmaxs, answer_ids, vocab, rev_vocab,
    num_examples_per_epoch, b_global_step):
  """Run Eval once.
  """
  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)

  with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)) as sess:
    ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
    if ckpt and ckpt.model_checkpoint_path:
      # Restores from checkpoint
      print('model path:', ckpt.model_checkpoint_path)
      saver.restore(sess, ckpt.model_checkpoint_path)

      # extract global_step from it.
      global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
      if global_step == b_global_step:
          return global_step
    else:
      print('No checkpoint file found')
      return

    # Start the queue runners.
    coord = tf.train.Coordinator()
    try:
      threads = []
      for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
        threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                         start=True))
      num_iter = 1 + int(
          num_examples_per_epoch / FLAGS.batch_size / FLAGS.num_gpus
      )


      desc_list = []
      answer_list = []
      desc_token_list = []
      answer_token_list = []
      step = 0
      while step < num_iter and not coord.should_stop():
        results = sess.run([argmaxs, answer_ids])
        desc = results[0].tolist()
        answer = results[1].tolist()
        desc_list += desc
        answer_list += answer
        step += 1

      for i in xrange(len(desc_list)):
        desc = []
        answer = []
        for k in xrange(len(desc_list[i])):
          token_id = desc_list[i][k]
          if token_id == EOS_ID:
            break
          desc.append(rev_vocab[token_id])
        for k in xrange(len(answer_list[i])):
          token_id = answer_list[i][k]
          if token_id == EOS_ID:
            break
          answer.append(rev_vocab[token_id])
        desc_token_list.append(desc)
        answer_token_list.append(answer)

      colorlog.info(
          colored("Validation Output Example (%s)" % global_step, 'green')
      )
      for i, (desc, answer) in enumerate(
          zip(desc_token_list[:15], answer_token_list[:15])
      ):
        print("%d." % (i))
        print(' '.join(answer))
        print(' '.join(desc) + "\n")

      evaluator = Evaluator()
      result = evaluator.evaluation(
          desc_token_list, answer_token_list, "coco"
      )

      summary = _inject_summary(result)
      summary_writer.add_summary(summary, global_step)

    except Exception as e:  # pylint: disable=broad-except
      coord.request_stop(e)

    coord.request_stop()
    coord.join(threads, stop_grace_period_secs=10)
  return global_step
Esempio n. 2
0
def eval_once(saver, summary_writer, argmaxs, ans_ids, vocab, rev_vocab,
              num_data, b_global_step):
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.80)
    sess_config = tf.ConfigProto(allow_soft_placement=True,
                                 log_device_placement=False,
                                 gpu_options=gpu_options)

    with tf.Session(config=sess_config) as sess:
        print('Hello')
        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)

            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            if global_step == b_global_step:
                return global_step
        else:
            print('No checkpoint file found.')
            return

        step = 0
        max_iter = num_data // FLAGS.BATCH_SIZE

        res = []
        ans = []
        res_tokens_list = []
        ans_tokens_list = []
        while (step < max_iter):
            results = sess.run([argmaxs, ans_ids])
            res += results[0].tolist()
            ans += results[1].tolist()
            step += 1

        for i in range(len(res)):
            caption = []
            answer = []
            for k in range(len(res[i])):
                token_id = res[i][k]
                if token_id == EOS_ID:
                    break
                caption.append(rev_vocab[token_id])
            for k in range(len(ans[i])):
                token_id = ans[i][k]
                if token_id == EOS_ID:
                    break
                answer.append(rev_vocab[token_id])
            res_tokens_list.append(caption)
            ans_tokens_list.append(answer)

        colorlog.info(
            colored("Validation output example: (%s)" % global_step, 'green'))
        for i, (res,
                ans) in enumerate(zip(res_tokens_list[:5],
                                      ans_tokens_list[:5])):
            print('%d' % i)
            print(' '.join(ans))
            print(' '.join(res))

        evaluator = Evaluator()
        result = evaluator.evaluation(res_tokens_list, ans_tokens_list, 'coco')

        summary = inject_summary(result)
        summary_writer.add_summary(summary, global_step)

        return global_step