Beispiel #1
0
def main(argv=None):

    dataset = _get_input()

    # Extract input tensors for evaluation
    iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
    features, labels = iterator.get_next()

    # Construct the evaluation function
    evaluate_fn = model_fn.evaluate_fn()

    # Wrap the ops in an Estimator spec object
    estimator_spec = evaluate_fn(features, labels, tf.estimator.ModeKeys.EVAL,
                                 {'continuous_eval': True})

    # Extract the necessary ops and the final tensors from the estimator spec
    update_op, value_ops = _extract_metric_update_ops(
        estimator_spec.eval_metric_ops)

    # Specify to evaluate N number of batches (in this case N==1)
    stop_hook = evaluation._StopAfterNEvalsHook(1)

    # Create summaries of values added to tf.GraphKeys.SUMMARIES
    summary_writer = tf.compat.v1.summary.FileWriter(
        os.path.join(FLAGS.model, FLAGS.output))
    summary_hook = evaluation.SummaryAtEndHook(summary_writer=summary_writer)

    # Evaluate repeatedly once a new checkpoint is found
    evaluation.evaluate_repeatedly(checkpoint_dir=FLAGS.model,
                                   eval_ops=update_op,
                                   final_ops=value_ops,
                                   hooks=[stop_hook, summary_hook],
                                   config=_get_config(),
                                   eval_interval_secs=FLAGS.eval_interval_secs)
Beispiel #2
0
def main(argv=None):

    # Initialize the classifier
    classifier = tf.estimator.Estimator(config=_get_config(),
                                        model_fn=model_fn.evaluate_fn(),
                                        model_dir=FLAGS.model,
                                        params={'continuous_eval': False})

    evaluations = classifier.evaluate(input_fn=_get_input)

    print(evaluations)