Esempio n. 1
0
def evaluate():
    print("in model evaluation")
    dataset = dataset_module.MyDataset(subset=FLAGS.subset)
    assert dataset.data_files()
    FLAGS.num_examples = dataset.num_examples_per_epoch(
    ) / FLAGS.subsample_factor
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        tensors_in, tensors_out = batching.inputs(dataset)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes() + 1

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits_all = model.inference(tensors_in,
                                     num_classes,
                                     for_training=False)
        model.loss(logits_all, tensors_out, batch_size=FLAGS.batch_size)
        loss_op = slim.losses.get_losses()

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                                graph_def=graph_def)

        while True:
            _eval_once(saver, summary_writer, logits_all, tensors_out, loss_op,
                       summary_op, tensors_in)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
Esempio n. 2
0
def evaluate():
    dataset = dataset_module.MyDataset(subset=FLAGS.subset)
    assert dataset.data_files()
    FLAGS.num_examples = dataset.num_examples_per_epoch(
    ) / FLAGS.subsample_factor

    output_dir = os.path.dirname(FLAGS.stat_output_path)
    if not tf.gfile.Exists(output_dir):
        tf.gfile.MakeDirs(output_dir)
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        tensors_in, tensors_out = batching.inputs(dataset)

        config = tf.ConfigProto(intra_op_parallelism_threads=1)
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            # Start the queue runners.
            coord = tf.train.Coordinator()
            try:
                threads = []
                for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                    threads.extend(
                        qr.create_threads(sess,
                                          coord=coord,
                                          daemon=True,
                                          start=True))

                eval_method = globals()[FLAGS.eval_method]
                eval_method(tensors_out, sess, coord, tensors_in)

            except Exception as e:  # pylint: disable=broad-except
                coord.request_stop(e)

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)