示例#1
0
def evaluate_model(config):
    """ Train the model using the passed in config """
    ###########################################################
    # Generate the model
    ###########################################################
    outputs = create_generator(config, input_utils.get_data_shape(config.dataset))

    ###########################################################
    # Setup the evaluation metrics and summaries
    ###########################################################
    # Generate the canvases that lead to the final output image
    summaries = []
    summaries.extend(layers.summarize_collection(graph_utils.GraphKeys.RNN_OUTPUTS))
    with tf.name_scope('canvases'):
        for step, canvas in enumerate(outputs):
            canvas = input_utils.reshape_images(canvas, config.dataset)
            tiled_images = image_utils.tile_images(canvas)
            summaries.append(tf.summary.image('step{0}'.format(step), tiled_images))

    summary_op = tf.summary.merge(summaries, name='summaries')

    ###########################################################
    # Begin evaluation
    ###########################################################
    checkpoint_path = FLAGS.checkpoint_path
    if tf.gfile.IsDirectory(checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
    eval_ops = tf.group(*outputs)
    hooks = [
        training.SummaryAtEndHook(FLAGS.log_dir, summary_op),
        training.StopAfterNEvalsHook(FLAGS.count)]

    training.evaluate_once(checkpoint_path, hooks=hooks, eval_ops=eval_ops)
示例#2
0
def evaluate(train_dir,
             eval_dir,
             config,
             dataset_fn,
             num_batches,
             master=''):
  """Evaluate the model repeatedly."""
  tf.gfile.MakeDirs(eval_dir)

  _trial_summary(
      config.hparams, config.eval_examples_path or config.tfds_name, eval_dir)
  with tf.Graph().as_default():
    model = config.model
    model.build(config.hparams,
                config.data_converter.output_depth,
                is_training=False)

    eval_op = model.eval(
        **_get_input_tensors(dataset_fn().take(num_batches), config))

    hooks = [
        contrib_training.StopAfterNEvalsHook(num_batches),
        contrib_training.SummaryAtEndHook(eval_dir)
    ]
    contrib_training.evaluate_repeatedly(
        train_dir,
        eval_ops=eval_op,
        hooks=hooks,
        eval_interval_secs=60,
        master=master)
示例#3
0
def run_eval(build_graph_fn,
             train_dir,
             eval_dir,
             num_batches,
             timeout_secs=300):
    """Runs the training loop.

    Args:
      build_graph_fn: A function that builds the graph ops.
      train_dir: The path to the directory where checkpoints will be loaded
          from for evaluation.
      eval_dir: The path to the directory where the evaluation summary events
          will be written to.
      num_batches: The number of full batches to use for each evaluation step.
      timeout_secs: The number of seconds after which to stop waiting for a new
          checkpoint.
    Raises:
      ValueError: If `num_batches` is less than or equal to 0.
    """
    if num_batches <= 0:
        raise ValueError(
            '`num_batches` must be greater than 0. Check that the batch size is '
            'no larger than the number of records in the eval set.')
    with tf.Graph().as_default():
        build_graph_fn()

        global_step = tf.train.get_or_create_global_step()
        loss = tf.get_collection('loss')[0]
        perplexity = tf.get_collection('metrics/perplexity')[0]
        accuracy = tf.get_collection('metrics/accuracy')[0]
        eval_ops = tf.get_collection('eval_ops')

        logging_dict = {
            'Global Step': global_step,
            'Loss': loss,
            'Perplexity': perplexity,
            'Accuracy': accuracy
        }
        hooks = [
            EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),
            contrib_training.StopAfterNEvalsHook(num_batches),
            contrib_training.SummaryAtEndHook(eval_dir),
        ]

        contrib_training.evaluate_repeatedly(train_dir,
                                             eval_ops=eval_ops,
                                             hooks=hooks,
                                             eval_interval_secs=60,
                                             timeout=timeout_secs)
示例#4
0
def Evaluacion(build_graph_fn,
               train_dir,
               eval_dir,
               num_batches,
               timeout_secs=300):

    tf.compat.v1.logging.set_verbosity('INFO')
    if num_batches <= 0:
        raise ValueError(
            '`num_batches` must be greater than 0. Check that the batch size is '
            'no larger than the number of records in the eval set.')
    with tf.Graph().as_default():
        # Creamos un modelo igual que el del entrenamiento
        build_graph_fn()
        # Define the summaries to write:
        global_step = tf.train.get_or_create_global_step()
        loss = tf.get_collection('loss')[0]
        perplexity = tf.get_collection('metrics/perplexity')[0]
        accuracy = tf.get_collection('metrics/accuracy')[0]
        eval_ops = tf.get_collection('eval_ops')

        logging_dict = {
            'Global Step': global_step,
            'Loss': loss,
            'Perplexity': perplexity,
            'Accuracy': accuracy
        }
        hooks = [
            EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),
            contrib_training.StopAfterNEvalsHook(num_batches),
            contrib_training.SummaryAtEndHook(eval_dir),
        ]
        # names_to_values = contrib_training.evaluate_once(
        #     checkpoint_path=train_dir,
        #     eval_ops=eval_ops,
        #     final_ops=logging_dict,
        #     hooks=hooks,
        #     config=None)
        # for name in names_to_values:
        #     print('Metric %s has value %f.' % (name, names_to_values[name]))
        contrib_training.evaluate_repeatedly(train_dir,
                                             eval_ops=eval_ops,
                                             hooks=hooks,
                                             eval_interval_secs=2,
                                             timeout=timeout_secs)
示例#5
0
def evaluate_model(config):
    """ Train the model using the passed in config """
    ###########################################################
    # Create the input pipeline
    ###########################################################
    with tf.name_scope('input_pipeline'):
        dataset = input_utils.get_dataset(config.datadir,
                                          config.dataset,
                                          config.datasubset,
                                          num_folds=config.fold_count,
                                          fold=config.fold,
                                          holdout=True)

        init_op, init_feed_dict, image = input_utils.get_data(
            config.dataset,
            dataset,
            config.batch_size,
            num_epochs=config.num_epochs,
            num_readers=config.num_readers)

        images = tf.train.batch([image],
                                config.batch_size,
                                num_threads=config.num_preprocessing_threads,
                                capacity=5 * config.batch_size)

    ###########################################################
    # Generate the model
    ###########################################################
    outputs = create_model(config, images, dataset)

    ###########################################################
    # Setup the evaluation metrics and summaries
    ###########################################################
    summaries = []
    metrics_map = {}
    for loss in tf.losses.get_losses():
        metrics_map[loss.op.name] = metrics.streaming_mean(loss)

    for metric in tf.get_collection(graph_utils.GraphKeys.METRICS):
        metrics_map[metric.op.name] = metrics.streaming_mean(metric)

    total_loss = tf.losses.get_total_loss()
    metrics_map[total_loss.op.name] = metrics.streaming_mean(total_loss)
    names_to_values, names_to_updates = metrics.aggregate_metric_map(
        metrics_map)

    # Create summaries of the metrics and print them to the screen
    for name, value in names_to_values.iteritems():
        summary = tf.summary.scalar(name, value, collections=[])
        summaries.append(tf.Print(summary, [value], name))

    summaries.extend(layers.summarize_collection(tf.GraphKeys.MODEL_VARIABLES))
    summaries.extend(layers.summarize_collection(
        graph_utils.GraphKeys.METRICS))
    summaries.extend(
        layers.summarize_collection(graph_utils.GraphKeys.RNN_OUTPUTS))
    summaries.extend(
        layers.summarize_collection(graph_utils.GraphKeys.TRAINING_PARAMETERS))

    images = input_utils.reshape_images(images, config.dataset)
    tiled_images = image_utils.tile_images(images)
    summaries.append(tf.summary.image('input_batch', tiled_images))

    # Generate the canvases that lead to the final output image
    with tf.name_scope('canvases'):
        for step, canvas in enumerate(outputs):
            canvas = input_utils.reshape_images(canvas, config.dataset)
            tiled_images = image_utils.tile_images(canvas)
            summaries.append(
                tf.summary.image('step{0}'.format(step), tiled_images))

    summary_op = tf.summary.merge(summaries, name='summaries')

    ###########################################################
    # Begin evaluation
    ###########################################################
    checkpoint_path = FLAGS.checkpoint_path
    eval_ops = tf.group(*names_to_updates.values())
    hooks = [
        training.SummaryAtEndHook(log_dir=FLAGS.log_dir,
                                  summary_op=summary_op),
        training.StopAfterNEvalsHook(
            math.ceil(dataset.num_samples / float(config.batch_size)))
    ]

    eval_kwargs = {}
    eval_fn = training.evaluate_repeatedly
    if FLAGS.once:
        if tf.gfile.IsDirectory(checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
        eval_fn = training.evaluate_once
    else:
        assert tf.gfile.IsDirectory(checkpoint_path), (
            'checkpoint path must be a directory when using loop evaluation')

    eval_fn(checkpoint_path, hooks=hooks, eval_ops=eval_ops, **eval_kwargs)
示例#6
0
def evaluate_model(config):
    """ Train the model using the passed in config """
    ###########################################################
    # Create the input pipeline
    ###########################################################
    with tf.name_scope('input_pipeline'):
        dataset = input_utils.get_dataset(config.datadir, config.dataset,
                                          config.datasubset)

        init_op, init_feed_dict, image, label = input_utils.get_data(
            config.dataset,
            dataset,
            config.batch_size,
            num_epochs=config.num_epochs,
            num_readers=config.num_readers)

        images, labels = tf.train.batch(
            [image, label],
            config.batch_size,
            num_threads=config.num_preprocessing_threads,
            capacity=5 * config.batch_size)

    ###########################################################
    # Generate the model
    ###########################################################
    outputs = create_model(config, images, dataset)
    tfprof.model_analyzer.print_model_analysis(tf.get_default_graph())

    ###########################################################
    # Setup the evaluation metrics and summaries
    ###########################################################
    summaries = []
    metrics_map = {}
    for metric in tf.get_collection(graph_utils.GraphKeys.METRICS):
        metrics_map[metric.op.name] = metrics.streaming_mean(metric)

    predictions = tf.argmax(outputs, 1)
    metrics_map['accuracy'] = metrics.streaming_accuracy(predictions, labels)
    metrics_map['recall_5'] = metrics.streaming_sparse_recall_at_k(
        outputs, tf.expand_dims(labels, 1), 5)

    names_to_values, names_to_updates = metrics.aggregate_metric_map(
        metrics_map)

    # Create summaries of the metrics and print them to the screen
    for name, value in names_to_values.iteritems():
        summary = tf.summary.scalar(name, value, collections=[])
        summaries.append(tf.Print(summary, [value], name))

    summaries.extend(layers.summarize_collection(
        graph_utils.GraphKeys.METRICS))
    summaries.extend(
        layers.summarize_collection(graph_utils.GraphKeys.QUANTIZED_VARIABLES))
    summaries.extend(
        layers.summarize_collection(graph_utils.GraphKeys.TRAINING_PARAMETERS))

    tiled_images = image_utils.tile_images(images)
    summaries.append(tf.summary.image('input_batch', tiled_images))

    summary_op = tf.summary.merge(summaries, name='summaries')

    ###########################################################
    # Begin evaluation
    ###########################################################
    checkpoint_path = FLAGS.checkpoint_path
    eval_ops = tf.group(*names_to_updates.values())
    scaffold = tf.train.Scaffold(init_op, init_feed_dict)
    hooks = [
        training.SummaryAtEndHook(FLAGS.log_dir, summary_op),
        training.StopAfterNEvalsHook(
            math.ceil(dataset.num_samples / float(config.batch_size)))
    ]

    eval_kwargs = {}
    eval_fn = training.evaluate_repeatedly
    if FLAGS.once:
        if tf.gfile.IsDirectory(checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
        eval_fn = training.evaluate_once
    else:
        assert tf.gfile.IsDirectory(checkpoint_path), (
            'checkpoint path must be a directory when using loop evaluation')

        # On Tensorflow master fd87896 fixes this, but for now just set a very large number
        eval_kwargs['max_number_of_evaluations'] = sys.maxint

    eval_fn(checkpoint_path,
            scaffold=scaffold,
            hooks=hooks,
            eval_ops=eval_ops,
            **eval_kwargs)