Ejemplo n.º 1
0
def _multiclass_metrics(predictions, labels, weights):
    metrics = dict()
    logits = predictions["scores"]
    classes = math_ops.argmax(logits, 1)
    metrics["accuracy"] = metrics_lib.streaming_accuracy(
        classes, labels, weights)
    return metrics
Ejemplo n.º 2
0
def main(_):
    assert FLAGS.train_dir, "--train_dir is required."
    if tf.gfile.Exists(FLAGS.summaries_dir):
        tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
    tf.gfile.MakeDirs(FLAGS.summaries_dir)

    config = configuration.Config()

    dataset_eval = loader.get_split(FLAGS.split_name,
                                    dataset_dir=FLAGS.data_dir)
    if FLAGS.preprocess_abs:
        preprocess_fn = tf.abs
    else:
        preprocess_fn = None

    # whther it is a 2d input
    is_2D = common.is_2D(FLAGS.model)

    series, labels, labels_one_hot = loader.load_batch(
        dataset_eval,
        batch_size=config.batch_size,
        is_2D=is_2D,
        preprocess_fn=preprocess_fn)

    # Build lazy model
    model = common.convert_name_to_instance(FLAGS.model, config, 'eval')

    endpoints = model.build(inputs=series, is_training=False)
    predictions = tf.to_int64(tf.argmax(endpoints.logits, 1))

    slim.get_or_create_global_step()

    # Choose the metrics to compute:
    names_to_values, names_to_updates = metrics.aggregate_metric_map({
        'accuracy':
        metrics.streaming_accuracy(predictions, labels),
        'precision':
        metrics.streaming_precision(predictions, labels),
        'recall':
        metrics.streaming_recall(predictions, labels),
    })

    # Create the summary ops such that they also print out to std output:
    summary_ops = []
    for metric_name, metric_value in names_to_values.iteritems():
        op = tf.summary.scalar(metric_name, metric_value)
        op = tf.Print(op, [metric_value], metric_name)
        summary_ops.append(op)

    slim.evaluation.evaluation_loop(
        master='',
        checkpoint_dir=FLAGS.train_dir,
        logdir=FLAGS.summaries_dir,
        eval_op=names_to_updates.values(),
        num_evals=min(FLAGS.num_batches, dataset_eval.num_samples),
        eval_interval_secs=FLAGS.eval_interval_secs,
        max_number_of_evaluations=FLAGS.num_of_steps,
        summary_op=tf.summary.merge(summary_ops),
        session_config=config.session_config,
    )
Ejemplo n.º 3
0
    def test_setup(self):
        # Load reader
        with tf.name_scope("create_inputs"):
            reader = ImageReader(self.conf.data_dir, self.conf.valid_data_list,
                                 None, False, False, self.conf.ignore_label,
                                 IMG_MEAN, self.coord)
            image, label = reader.image, reader.label  # [h, w, 3 or 1]
        # Add one batch dimension [1, h, w, 3 or 1]
        self.image_batch, self.label_batch = tf.expand_dims(
            image, dim=0), tf.expand_dims(label, dim=0)

        # Create network
        if self.conf.encoder_name not in ['res101', 'res50', 'deeplab']:
            print('encoder_name ERROR!')
            print("Please input: res101, res50, or deeplab")
            sys.exit(-1)
        elif self.conf.encoder_name == 'deeplab':
            net = Deeplab_v2(self.image_batch, self.conf.num_classes, False)
        else:
            net = ResNet_segmentation(self.image_batch, self.conf.num_classes,
                                      False, self.conf.encoder_name)
            pass

        # predictions
        raw_output = net.outputs
        raw_output = tf.image.resize_bilinear(
            raw_output,
            tf.shape(self.image_batch)[1:3, ])
        raw_output = tf.argmax(raw_output, axis=3)
        pred = tf.expand_dims(raw_output, dim=3)
        self.pred = tf.reshape(pred, [
            -1,
        ])
        # labels
        gt = tf.reshape(self.label_batch, [
            -1,
        ])
        # Ignoring all labels greater than or equal to n_classes.
        temp = tf.less_equal(gt, self.conf.num_classes - 1)
        weights = tf.cast(temp, tf.int32)

        # fix for tf 1.3.0
        gt = tf.where(temp, gt, tf.cast(temp, tf.uint8))

        # Pixel accuracy
        self.accu, self.accu_update_op = tcm.streaming_accuracy(
            self.pred, gt, weights=weights)

        # mIoU
        self.mIoU, self.mIou_update_op = tcm.streaming_mean_iou(
            self.pred, gt, self.conf.num_classes, weights)

        # confusion matrix
        self.confusion_matrix = tcm.confusion_matrix(
            self.pred, gt, num_classes=self.conf.num_classes, weights=weights)

        # Loader for loading the checkpoint
        self.loader = tf.train.Saver(var_list=tf.global_variables())
        pass
Ejemplo n.º 4
0
 def _multiclass_metrics(predictions, labels, weights):
   """Prepares eval metrics for multiclass eval."""
   metrics = dict()
   logits = predictions["scores"]
   classes = math_ops.argmax(logits, 1)
   metrics["accuracy"] = metrics_lib.streaming_accuracy(
       classes, labels, weights)
   return metrics
 def _compute_accuracy(logits, targets, weights=None):
   if self._n_classes > 2:
     _, predictions = nn.top_k(logits, 1)
   else:
     predictions = array_ops.reshape(logits, [-1])
     predictions = math_ops.greater(predictions,
                                    array_ops.zeros_like(predictions))
     targets = array_ops.reshape(targets, [-1])
   return metrics_lib.streaming_accuracy(
       math_ops.to_int32(predictions), math_ops.to_int32(targets), weights)
Ejemplo n.º 6
0
    def add_classification_output_layer(self,
                                        last_hidden_layer,
                                        gt_labels,
                                        num_classes,
                                        corpus_tag,
                                        task_tag,
                                        loss_weight=1):
        with tf.variable_scope("output_layer_%s" % task_tag) as layer_scope:
            last_out = fully_connected(
                last_hidden_layer,
                num_classes,
                activation_fn=tf.identity,
                weights_regularizer=self.l1_l2_regularizer,
                scope=layer_scope)
            self.predictions = tf.nn.softmax(last_out)

        with tf.name_scope("%s_%s_stats" % (corpus_tag, task_tag)):
            loss = loss_weight * tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=gt_labels, logits=last_out))
            # utils.variable_summaries(loss, "loss", corpus_tag)
            self.variable_summaries(loss, "loss", task_tag)

            tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

            str_accu, _ = streaming_accuracy(
                tf.argmax(last_out, 1),
                gt_labels,
                name="stracc_%s" % corpus_tag,
                updates_collections=tf.GraphKeys.UPDATE_OPS)
            str_accu = 100 * str_accu
            # utils.variable_summaries(str_accu, "streaming_accuracy", corpus_tag)
            self.variable_summaries(str_accu, "streaming_accuracy", task_tag)

            updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            self.streaming_accu_op = control_flow_ops.with_dependencies(
                updates_op, str_accu)

            correct_prediction = tf.equal(tf.argmax(last_out, 1), gt_labels)

            accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                              tf.float32)) * 100
            a = tf.cast(tf.argmax(last_out, 1), tf.float32)
            b = tf.one_hot(gt_labels, num_classes)

            # print(tf.argmax(last_out, 1).dtype.name)
            # print(gt_labels.dtype.name)
            # TODO: double check
            auc = tf.metrics.auc(b, self.predictions)
            # auc = tf.contrib.metrics.streaming_auc(a, b)
            # utils.variable_summaries(accuracy, "accuracy", corpus_tag)
            self.variable_summaries(accuracy, "accuracy", task_tag)
            self.accuracy = accuracy
            self.auc = auc
Ejemplo n.º 7
0
def get_eval_metric_ops(problem_type, prediction_type, sequence_length,
                        prediction_dict, labels):
    """Returns eval metric ops for given `problem_type` and `prediction_type`.

  Args:
    problem_type: `ProblemType.CLASSIFICATION` or
      `ProblemType.LINEAR_REGRESSION`.
    prediction_type: `PredictionType.SINGLE_VALUE` or
      `PredictionType.MULTIPLE_VALUE`.
    sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
      containing the length of each sequence in the batch. If `None`, sequences
      are assumed to be unpadded.
    prediction_dict: A dict of prediction tensors.
    labels: The label `Tensor`.

  Returns:
    A `dict` mapping strings to the result of calling the metric_fn.
  """
    eval_metric_ops = {}
    if problem_type == constants.ProblemType.CLASSIFICATION:
        # Multi value classification
        if prediction_type == PredictionType.MULTIPLE_VALUE:
            mask_predictions, mask_labels = mask_activations_and_labels(
                prediction_dict[prediction_key.PredictionKey.CLASSES], labels,
                sequence_length)
            eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
                predictions=mask_predictions, labels=mask_labels)
        # Single value classification
        elif prediction_type == PredictionType.SINGLE_VALUE:
            eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
                predictions=prediction_dict[
                    prediction_key.PredictionKey.CLASSES],
                labels=labels)
    elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
        # Multi value regression
        if prediction_type == PredictionType.MULTIPLE_VALUE:
            pass
        # Single value regression
        elif prediction_type == PredictionType.SINGLE_VALUE:
            pass
    return eval_metric_ops
Ejemplo n.º 8
0
def get_eval_metric_ops(problem_type, prediction_type, sequence_length,
                        prediction_dict, labels):
  """Returns eval metric ops for given `problem_type` and `prediction_type`.

  Args:
    problem_type: `ProblemType.CLASSIFICATION` or
      `ProblemType.LINEAR_REGRESSION`.
    prediction_type: `PredictionType.SINGLE_VALUE` or
      `PredictionType.MULTIPLE_VALUE`.
    sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
      containing the length of each sequence in the batch. If `None`, sequences
      are assumed to be unpadded.
    prediction_dict: A dict of prediction tensors.
    labels: The label `Tensor`.

  Returns:
    A `dict` mapping strings to the result of calling the metric_fn.
  """
  eval_metric_ops = {}
  if problem_type == constants.ProblemType.CLASSIFICATION:
    # Multi value classification
    if prediction_type == PredictionType.MULTIPLE_VALUE:
      mask_predictions, mask_labels = mask_activations_and_labels(
          prediction_dict[prediction_key.PredictionKey.CLASSES], labels,
          sequence_length)
      eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
          predictions=mask_predictions, labels=mask_labels)
    # Single value classification
    elif prediction_type == PredictionType.SINGLE_VALUE:
      eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
          predictions=prediction_dict[prediction_key.PredictionKey.CLASSES],
          labels=labels)
  elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
    # Multi value regression
    if prediction_type == PredictionType.MULTIPLE_VALUE:
      pass
    # Single value regression
    elif prediction_type == PredictionType.SINGLE_VALUE:
      pass
  return eval_metric_ops
Ejemplo n.º 9
0
def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
    """Returns a dictionary of evaluation metric ops for logistic regression.

  Args:
    labels: The labels `Tensor`, or a dict with only one `Tensor` keyed by name.
    predictions: The predictions `Tensor`.
    thresholds: List of floating point thresholds to use for accuracy,
      precision, and recall metrics.

  Returns:
    A dict of metric results keyed by name.
  """
    # If labels is a dict with a single key, unpack into a single tensor.
    labels_tensor = labels
    if isinstance(labels, dict) and len(labels) == 1:
        labels_tensor = labels.values()[0]

    metrics = {}
    metrics[metric_key.MetricKey.PREDICTION_MEAN] = metrics_lib.streaming_mean(
        predictions)
    metrics[metric_key.MetricKey.LABEL_MEAN] = metrics_lib.streaming_mean(
        labels_tensor)
    # Also include the streaming mean of the label as an accuracy baseline, as
    # a reminder to users.
    metrics[metric_key.MetricKey.
            ACCURACY_BASELINE] = metrics_lib.streaming_mean(labels_tensor)

    metrics[metric_key.MetricKey.AUC] = metrics_lib.streaming_auc(
        labels=labels_tensor, predictions=predictions)

    for threshold in thresholds:
        predictions_at_threshold = math_ops.cast(
            math_ops.greater_equal(predictions, threshold),
            dtypes.float32,
            name='predictions_at_threshold_%f' % threshold)
        metrics[metric_key.MetricKey.ACCURACY_MEAN %
                threshold] = (metrics_lib.streaming_accuracy(
                    labels=labels_tensor,
                    predictions=predictions_at_threshold))
        # Precision for positive examples.
        metrics[metric_key.MetricKey.PRECISION_MEAN %
                threshold] = (metrics_lib.streaming_precision(
                    labels=labels_tensor,
                    predictions=predictions_at_threshold))
        # Recall for positive examples.
        metrics[metric_key.MetricKey.RECALL_MEAN %
                threshold] = (metrics_lib.streaming_recall(
                    labels=labels_tensor,
                    predictions=predictions_at_threshold))

    return metrics
Ejemplo n.º 10
0
    def add_multiple_classification_output_layer(self,
                                                 last_hidden_layer,
                                                 gt_labels,
                                                 num_classes,
                                                 corpus_tag,
                                                 task_tag,
                                                 loss_weight=1):
        with tf.variable_scope("output_layer_%s" % task_tag) as layer_scope:
            output_dim = gt_labels.shape[1].value
            last_out = fully_connected(
                last_hidden_layer,
                output_dim,
                activation_fn=tf.identity,
                weights_regularizer=self.l1_l2_regularizer,
                scope=layer_scope)
            self.predictions = tf.map_fn(tf.nn.sigmoid, last_out)

        with tf.name_scope("%s_%s_stats" % (corpus_tag, task_tag)):
            loss = loss_weight * tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(labels=gt_labels,
                                                        logits=last_out))

            self.variable_summaries(loss, "loss", task_tag)

            tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

            str_accu, _ = streaming_accuracy(
                tf.map_fn(tf.rint, last_out),
                gt_labels,
                name="stracc_%s" % corpus_tag,
                updates_collections=tf.GraphKeys.UPDATE_OPS)

            str_accu = 100 * str_accu
            # utils.variable_summaries(str_accu, "streaming_accuracy", corpus_tag)
            self.variable_summaries(str_accu, "streaming_accuracy", task_tag)

            updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            self.streaming_accu_op = control_flow_ops.with_dependencies(
                updates_op, str_accu)

            correct_prediction = tf.equal(tf.map_fn(tf.rint, last_out),
                                          gt_labels)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                              tf.float32)) * 100

            auc = tf.metrics.auc(gt_labels, self.predictions)
            # utils.variable_summaries(accuracy, "accuracy", corpus_tag)
            self.variable_summaries(accuracy, "accuracy", task_tag)
            self.accuracy = accuracy
            self.auc = auc
Ejemplo n.º 11
0
def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
  """Returns a dictionary of evaluation metric ops for logistic regression.

  Args:
    labels: The labels `Tensor`, or a dict with only one `Tensor` keyed by name.
    predictions: The predictions `Tensor`.
    thresholds: List of floating point thresholds to use for accuracy,
      precision, and recall metrics.

  Returns:
    A dict of metric results keyed by name.
  """
  # If labels is a dict with a single key, unpack into a single tensor.
  labels_tensor = labels
  if isinstance(labels, dict) and len(labels) == 1:
    labels_tensor = labels.values()[0]

  metrics = {}
  metrics[metric_key.MetricKey.PREDICTION_MEAN] = metrics_lib.streaming_mean(
      predictions)
  metrics[metric_key.MetricKey.LABEL_MEAN] = metrics_lib.streaming_mean(
      labels_tensor)
  # Also include the streaming mean of the label as an accuracy baseline, as
  # a reminder to users.
  metrics[metric_key.MetricKey.ACCURACY_BASELINE] = metrics_lib.streaming_mean(
      labels_tensor)

  metrics[metric_key.MetricKey.AUC] = metrics_lib.streaming_auc(
      labels=labels_tensor, predictions=predictions)

  for threshold in thresholds:
    predictions_at_threshold = math_ops.cast(
        math_ops.greater_equal(predictions, threshold),
        dtypes.float32,
        name='predictions_at_threshold_%f' % threshold)
    metrics[metric_key.MetricKey.ACCURACY_MEAN % threshold] = (
        metrics_lib.streaming_accuracy(labels=labels_tensor,
                                       predictions=predictions_at_threshold))
    # Precision for positive examples.
    metrics[metric_key.MetricKey.PRECISION_MEAN % threshold] = (
        metrics_lib.streaming_precision(labels=labels_tensor,
                                        predictions=predictions_at_threshold))
    # Recall for positive examples.
    metrics[metric_key.MetricKey.RECALL_MEAN % threshold] = (
        metrics_lib.streaming_recall(labels=labels_tensor,
                                     predictions=predictions_at_threshold))

  return metrics
Ejemplo n.º 12
0
def evaluate():
    with tf.Graph().as_default():
        config = tf.ConfigProto(device_count={'GPU': 0})

        images, labels = utils.load_batch(shards=VAL_SHARDS,
                                          batch_size=FLAGS.batch_size,
                                          train=False,
                                          crop=False,
                                          flip=False)

        predictions = alexnet.AlexNet(images,
                                      batch_size=FLAGS.batch_size).model
        prediction = tf.to_int64(tf.argmax(predictions,
                                           1))  # Returns index of largest

        mse_op = metrics.streaming_mean_squared_error(prediction, labels)
        rmse_op = metrics.streaming_root_mean_squared_error(prediction, labels)
        accuracy_op = metrics.streaming_accuracy(prediction, labels)
        precision_op = metrics.streaming_precision(prediction, labels)

        metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
            'mse':
            mse_op,
            'rmse':
            rmse_op,
            'accuracy':
            accuracy_op,
            'precision':
            precision_op,
        })

        for metric_name, metric_value in metrics_to_values.items():
            tf.summary.scalar(metric_name, metric_value)

        slim.evaluation.evaluation_loop('',
                                        FLAGS.trainlog_dir,
                                        FLAGS.evallog_dir,
                                        num_evals=FLAGS.num_evals,
                                        eval_op=list(
                                            metrics_to_updates.values()),
                                        eval_interval_secs=5,
                                        session_config=config)
        '''checkpoint_list = [FLAGS.trainlog_momentum_dir,
            def build_test_metrics(self):

                raw_labels = multihot_labels(self.mmsis)
                mask = tf.to_float(tf.equal(tf.reduce_sum(raw_labels, 1), 1))
                labels = tf.to_int32(tf.argmax(raw_labels, 1))

                predictions = tf.to_int32(tf.argmax(self.prediction, 1))

                metrics_map = {
                    '%s/Test-accuracy' % self.name:
                    metrics.streaming_accuracy(predictions,
                                               labels,
                                               weights=mask)
                }

                if self.metrics == 'all':
                    for i, cls in enumerate(self.classes):
                        cls_name = cls.replace(' ', '-')
                        trues = tf.to_int32(tf.equal(labels, i))
                        preds = tf.to_int32(tf.equal(predictions, i))
                        recall = metrics.streaming_recall(preds,
                                                          trues,
                                                          weights=mask)
                        precision = metrics.streaming_precision(preds,
                                                                trues,
                                                                weights=mask)
                        metrics_map["%s/Class-%s-Precision" %
                                    (self.name, cls_name)] = recall
                        metrics_map["%s/Class-%s-Recall" %
                                    (self.name, cls_name)] = precision
                        metrics_map["%s/Class-%s-F1-Score" %
                                    (self.name, cls_name)] = f1(
                                        recall, precision)
                        metrics_map["%s/Class-%s-ROC-AUC" %
                                    (self.name,
                                     cls_name)] = metrics.streaming_auc(
                                         self.prediction[:, i],
                                         trues,
                                         weights=mask)

                return metrics.aggregate_metric_map(metrics_map)
Ejemplo n.º 14
0
    def add_classification_output_layer(self, last_hidden_layer, gt_labels, num_classes, corpus_tag, task_tag,
                                        loss_weight=1):
        # returns loss op
        with tf.variable_scope("output_layer_%s" % task_tag) as layer_scope:
            last_out = fully_connected(last_hidden_layer, num_classes, activation_fn=tf.identity,
                                       weights_regularizer=l1_l2_regularizer(self.l1_reg, self.l2_reg),
                                       scope=layer_scope)
            self.predictions = tf.nn.softmax(last_out)

        with tf.name_scope("%s_loss_%s" % (corpus_tag, task_tag)):
            loss = loss_weight * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(last_out, gt_labels))
            utils.variable_summaries(loss, "loss", corpus_tag)
            tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        with tf.name_scope('%s_accuracy_%s' % (corpus_tag, task_tag)):
            # correct_prediction = tf.equal(tf.argmax(last_out, 1), gt_labels)
            # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) * 100
            accuracy, _ = streaming_accuracy(tf.argmax(last_out, 1), gt_labels, name="acc_%s" % corpus_tag,
                                             updates_collections=tf.GraphKeys.UPDATE_OPS)

            utils.variable_summaries(accuracy, "accuracy", corpus_tag)

            updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            self.calculate_accuracy_op = control_flow_ops.with_dependencies(updates_op, accuracy)
Ejemplo n.º 15
0
def evaluate_model(config):
    """ Train the model using the passed in config """
    ###########################################################
    # Create the input pipeline
    ###########################################################
    with tf.name_scope('input_pipeline'):
        dataset = input_utils.get_dataset(config.datadir, config.dataset,
                                          config.datasubset)

        init_op, init_feed_dict, image, label = input_utils.get_data(
            config.dataset,
            dataset,
            config.batch_size,
            num_epochs=config.num_epochs,
            num_readers=config.num_readers)

        images, labels = tf.train.batch(
            [image, label],
            config.batch_size,
            num_threads=config.num_preprocessing_threads,
            capacity=5 * config.batch_size)

    ###########################################################
    # Generate the model
    ###########################################################
    outputs = create_model(config, images, dataset)
    tfprof.model_analyzer.print_model_analysis(tf.get_default_graph())

    ###########################################################
    # Setup the evaluation metrics and summaries
    ###########################################################
    summaries = []
    metrics_map = {}
    for metric in tf.get_collection(graph_utils.GraphKeys.METRICS):
        metrics_map[metric.op.name] = metrics.streaming_mean(metric)

    predictions = tf.argmax(outputs, 1)
    metrics_map['accuracy'] = metrics.streaming_accuracy(predictions, labels)
    metrics_map['recall_5'] = metrics.streaming_sparse_recall_at_k(
        outputs, tf.expand_dims(labels, 1), 5)

    names_to_values, names_to_updates = metrics.aggregate_metric_map(
        metrics_map)

    # Create summaries of the metrics and print them to the screen
    for name, value in names_to_values.iteritems():
        summary = tf.summary.scalar(name, value, collections=[])
        summaries.append(tf.Print(summary, [value], name))

    summaries.extend(layers.summarize_collection(
        graph_utils.GraphKeys.METRICS))
    summaries.extend(
        layers.summarize_collection(graph_utils.GraphKeys.QUANTIZED_VARIABLES))
    summaries.extend(
        layers.summarize_collection(graph_utils.GraphKeys.TRAINING_PARAMETERS))

    tiled_images = image_utils.tile_images(images)
    summaries.append(tf.summary.image('input_batch', tiled_images))

    summary_op = tf.summary.merge(summaries, name='summaries')

    ###########################################################
    # Begin evaluation
    ###########################################################
    checkpoint_path = FLAGS.checkpoint_path
    eval_ops = tf.group(*names_to_updates.values())
    scaffold = tf.train.Scaffold(init_op, init_feed_dict)
    hooks = [
        training.SummaryAtEndHook(FLAGS.log_dir, summary_op),
        training.StopAfterNEvalsHook(
            math.ceil(dataset.num_samples / float(config.batch_size)))
    ]

    eval_kwargs = {}
    eval_fn = training.evaluate_repeatedly
    if FLAGS.once:
        if tf.gfile.IsDirectory(checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
        eval_fn = training.evaluate_once
    else:
        assert tf.gfile.IsDirectory(checkpoint_path), (
            'checkpoint path must be a directory when using loop evaluation')

        # On Tensorflow master fd87896 fixes this, but for now just set a very large number
        eval_kwargs['max_number_of_evaluations'] = sys.maxint

    eval_fn(checkpoint_path,
            scaffold=scaffold,
            hooks=hooks,
            eval_ops=eval_ops,
            **eval_kwargs)
import tensorflow as tf
import tensorflow.contrib.metrics as tcm

# 数据输入
labels = tf.random_uniform(shape=[3], minval=1, maxval=10, dtype=tf.int32)
predictions = tf.random_uniform(shape=[3], minval=1, maxval=10, dtype=tf.int32)
predictions2 = tf.random_uniform(shape=[3],
                                 minval=1,
                                 maxval=10,
                                 dtype=tf.int32)

# 准确率 和 MAE(https://en.wikipedia.org/wiki/Mean_absolute_error)
accuracy, update_op_acc = tcm.streaming_accuracy(predictions,
                                                 labels,
                                                 name='prediction1')
# when evaluating the same metric multiple times on different inputs,
# one must specify the scope of each metric to avoid accumulating the results together:
accuracy2, update_op_acc2 = tcm.streaming_accuracy(predictions2,
                                                   labels,
                                                   name='prediction2')
error, update_op_error = tcm.streaming_mean_absolute_error(labels, predictions)

with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())

    for batch in range(10):
        accuracy_value, error_value, accuracy_value2 = sess.run(
            [update_op_acc, update_op_error, update_op_acc2])
        print("iterator: {}, accuracy1 : {}, error1: {}, accuracy2: {}".format(
            batch, accuracy_value, error_value, accuracy_value2))
    def test(self):

        # 图上下文
        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True))) as sess:

            # 改变数据大小
            shape = tf.shape(self.images_tensor)
            h, w = (tf.maximum(self.conf.input_size, shape[1]),
                    tf.maximum(self.conf.input_size, shape[2]))
            img = tf.image.resize_nearest_neighbor(self.images_tensor, [h, w])

            # logits
            logits = self.net.fit({"data": img})
            # 预测
            raw_output_up = tf.image.resize_bilinear(logits,
                                                     size=[h, w],
                                                     align_corners=True)
            raw_output_up = tf.image.crop_to_bounding_box(
                raw_output_up, 0, 0, shape[1], shape[2])
            raw_output_up = tf.argmax(raw_output_up, dimension=3)
            prediction = tf.expand_dims(raw_output_up, dim=3)

            # 评估
            pred = tf.reshape(prediction, [
                -1,
            ])
            gt = tf.reshape(self.labels_tensor, [
                -1,
            ])
            temp = tf.less_equal(gt, self.conf.num_classes - 1)
            weights = tf.cast(temp, tf.int32)
            gt = tf.where(temp, gt, tf.cast(temp, tf.uint8))
            acc, acc_update_op = tcm.streaming_accuracy(pred,
                                                        gt,
                                                        weights=weights)
            # confusion matrix
            confusion_matrix_tensor = tcm.confusion_matrix(
                pred, gt, num_classes=self.conf.num_classes, weights=weights)

            # 启动初始化
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            # 保存
            saver = tf.train.Saver(var_list=tf.global_variables(),
                                   max_to_keep=100)

            # 模型加载
            ckpt = tf.train.get_checkpoint_state(self.conf.checkpoints_path)
            if ckpt and ckpt.model_checkpoint_path:
                print('test from {}'.format(ckpt.model_checkpoint_path))
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise ("请先训练模型..., Train.py first")

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord, sess=sess)

            # test
            confusion_matrix = np.zeros(
                (self.conf.num_classes, self.conf.num_classes), dtype=np.int)
            for i in range(self.conf.test_num_steps // self.conf.batch_size):
                start = time.time()
                pred, _, c_matrix = sess.run(
                    [prediction, acc_update_op, confusion_matrix_tensor])
                confusion_matrix += c_matrix
                _diff_time = time.time() - start
                print('{}: cost {:.0f}ms'.format(i, _diff_time * 1000))
            # 总体
            self.compute_IoU_per_class(confusion_matrix)
            print("Pascal VOC 2012 validation dataset pixel accuracy: " +
                  str(sess.run(acc)))

            coord.request_stop()
            coord.join(threads)
        pass
Ejemplo n.º 18
0
def streaming_accuracy(name, predictions, labels):
    tf.summary.scalar(
        name,
        tfm.streaming_accuracy(predictions,
                               labels,
                               name='stream/{}'.format(name))[1])
Ejemplo n.º 19
0
    def __init__(self, data, conf):

        self.conf = conf
        # model
        self.weight_decay = self.conf.weight_decay
        self.is_training = self.conf.is_training

        # data
        self.data = data
        self.num_classes = self.data.num_classes
        self.image_height = self.data.image_height
        self.image_width = self.data.image_width
        self.image_channel = self.data.image_channel
        self.label_channel = self.data.label_channel
        self.label_channel = self.data.label_channel
        self.batch_size = self.data.batch_size

        # 学习
        self.learning_rate_tensor = tf.convert_to_tensor(
            self.conf.learning_rate)
        self.global_step = tf.get_variable(
            'global_step', [],
            initializer=tf.constant_initializer(0),
            trainable=False,
            dtype=tf.int32)
        # add summary
        tf.summary.scalar('learning_rate', self.learning_rate_tensor)

        # inputs
        self.images_tensor, self.labels_tensor = self.data.get_next_data()

        # output
        logits = self._network(self.images_tensor)

        self.prediction = tf.cast(
            tf.expand_dims(tf.argmax(logits, axis=3), dim=3), tf.uint8)

        # 评估
        pred = tf.reshape(self.prediction, [
            -1,
        ])
        gt = tf.reshape(self.labels_tensor, [
            -1,
        ])
        temp = tf.less_equal(gt, self.num_classes - 1)
        weights = tf.cast(temp, tf.int32)
        gt = tf.where(temp, gt, tf.cast(temp, tf.uint8))
        self.acc, self.acc_update_op = tcm.streaming_accuracy(pred,
                                                              gt,
                                                              weights=weights)
        # confusion matrix
        self.confusion_matrix = tcm.confusion_matrix(
            pred, gt, num_classes=self.num_classes, weights=weights)

        # loss
        self.loss = self._loss(logits)

        # save moving average
        variables_averages_op = tf.train.ExponentialMovingAverage(
            self.conf.moving_average_decay,
            self.global_step).apply(tf.trainable_variables())
        # 优化
        with tf.control_dependencies([variables_averages_op]):
            self.train_op = self._get_train_op()

        self.summary_op = tf.summary.merge_all()

        pass
Ejemplo n.º 20
0
Archivo: model.py Proyecto: ml-lab/PNC
    def _model_fn(features, labels, mode):
        """

        :param features:
        :param labels:
        :param mode:
        :return:
        """

        # Pop the name of the signal.
        if 'FN' in features:
            names = features.pop('FN')

        if 'FT' in features:
            labels = features.pop('FT')

        # Define the type of the inputs (they are all numeric).
        columns = [
            layers.real_valued_column(key) for key, value in features.items()
        ]
        #
        inputs = layers.input_from_feature_columns(features, columns)

        # Declare the hidden_layers variable.
        hidden_layers = None

        # Iterate all over the hidden units.
        for unit in hidden_units:
            # Create a new hidden layer.
            hidden_layers = tf.layers.dense(
                inputs=inputs if hidden_layers is None else hidden_layers,
                activation=tf.nn.relu,
                units=unit,
            )

        # Create a dropout layer.
        dropout_layer = layers.dropout(inputs=hidden_layers,
                                       keep_prob=1.0 - dropout)

        # Create the logits layer.
        logits = tf.layers.dense(inputs=dropout_layer,
                                 activation=None,
                                 units=2)

        if mode in (ModeKeys.PREDICT, ModeKeys.EVAL):
            # Calculate the probabilities.
            probabilities = tf.nn.softmax(logits)
            # And their indexes.
            predictions = tf.argmax(logits, 1)

        if mode in (ModeKeys.EVAL, ModeKeys.TRAIN):
            # Convert the labels in the one_hot format.
            onehot_labels = tf.one_hot(indices=labels, depth=2)
            # Define the class weights.
            class_weights = tf.constant(weights)
            # Deduce weights for batch samples based on their true label.
            reduced_weights = tf.reduce_sum(class_weights * onehot_labels,
                                            axis=1)
            # Compute your (unweighted) softmax cross entropy loss.
            unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(
                labels=onehot_labels, logits=logits)
            # Apply the weights, relying on broadcasting of the multiplication.
            weighted_losses = unweighted_losses * reduced_weights
            # Reduce the result to get your final loss.
            loss = tf.reduce_mean(weighted_losses)

        if mode == ModeKeys.PREDICT:

            # Convert predicted_indices back into strings
            predictions = {
                'classes': predictions,
                'scores': probabilities,
            }

            # export_outputs = {
            #     'prediction': tf.estimator.export.PredictOutput(predictions)
            # }

            # return tf.estimator.EstimatorSpec(
            #     mode=mode,
            #     predictions=predictions,
            #     # export_outputs=export_outputs,
            # )

            return tf.estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions,
            )

        if mode == ModeKeys.TRAIN:
            # Define the training rule.
            train_op = layers.optimize_loss(
                loss=loss,
                global_step=framework.get_global_step(),
                learning_rate=learning_rate,
                optimizer='SGD')

            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op)

        if mode == ModeKeys.EVAL:

            # Define the metrics to show up in the evaluation process.
            eval_metric_ops = {
                'accuracy':
                metrics.streaming_accuracy(predictions=predictions,
                                           labels=labels),
                'auroc':
                metrics.streaming_auc(predictions=predictions, labels=labels),
                'recall':
                metrics.streaming_recall(predictions=predictions,
                                         labels=labels),
                'precision':
                metrics.streaming_precision(predictions=predictions,
                                            labels=labels),
                'TP':
                metrics.streaming_true_positives(predictions=predictions,
                                                 labels=labels),
                'FN':
                metrics.streaming_false_negatives(predictions=predictions,
                                                  labels=labels),
                'FP':
                metrics.streaming_false_positives(predictions=predictions,
                                                  labels=labels),
                'TN':
                metrics.streaming_true_negatives(predictions=predictions,
                                                 labels=labels),
                #'gaccuracy' : metrics.streaming_accuracy(predictions=GP, labels=GL)
            }

            return tf.estimator.EstimatorSpec(mode=mode,
                                              predictions=predictions,
                                              loss=loss,
                                              eval_metric_ops=eval_metric_ops)
Ejemplo n.º 21
0
 def _accuracy_metric(predictions, target, weights=None):
   threshold_predictions = math_ops.to_float(
       math_ops.greater_equal(predictions, threshold))
   return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
                                         labels=target,
                                         weights=weights)
Ejemplo n.º 22
0
 def _accuracy_metric(predictions, labels, weights=None):
   threshold_predictions = math_ops.to_float(
       math_ops.greater_equal(predictions, threshold))
   return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
                                         labels=labels,
                                         weights=weights)
Ejemplo n.º 23
0
def main(argv=None):
    filename_queue = tf.train.string_input_producer([FLAGS.test_data_path],
                                                    num_epochs=1)
    image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(
        filename_queue)

    image_batch_tensor = tf.expand_dims(image, axis=0)
    annotation_batch_tensor = tf.expand_dims(annotation, axis=0)

    input_image_shape = tf.shape(image_batch_tensor)
    image_height_width = input_image_shape[1:3]
    image_height_width_float = tf.to_float(image_height_width)
    image_height_width_multiple = tf.to_int32(
        tf.round(image_height_width_float / 32) * 32)

    image_batch_tensor = tf.image.resize_images(image_batch_tensor,
                                                image_height_width_multiple)
    annotation_batch_tensor = tf.image.resize_nearest_neighbor(
        images=annotation_batch_tensor, size=image_height_width)

    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    logits = model.model(image_batch_tensor, is_training=False)
    pred = tf.argmax(logits, axis=3)
    pred = tf.expand_dims(pred, 3)
    pred = tf.image.resize_nearest_neighbor(images=pred,
                                            size=image_height_width)

    pred = tf.reshape(pred, [
        -1,
    ])
    gt = tf.reshape(annotation_batch_tensor, [
        -1,
    ])
    temp = tf.less_equal(gt, FLAGS.num_classes - 1)
    weights = tf.cast(temp, tf.int32)
    gt = tf.where(temp, gt, tf.cast(temp, tf.uint8))
    acc, acc_update_op = tcm.streaming_accuracy(pred, gt, weights=weights)
    miou, miou_update_op = tcm.streaming_mean_iou(
        pred, gt, num_classes=FLAGS.num_classes, weights=weights)

    with tf.get_default_graph().as_default():
        variable_averages = tf.train.ExponentialMovingAverage(
            0.997, global_step)
        saver = tf.train.Saver(variable_averages.variables_to_restore())

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            sess.run([
                tf.local_variables_initializer(),
                tf.global_variables_initializer()
            ])

            # 加载模型
            ckpt_state = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
            model_path = os.path.join(
                FLAGS.checkpoint_path,
                os.path.basename(ckpt_state.model_checkpoint_path))
            print('Restore from {}'.format(model_path))
            saver.restore(sess, model_path)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            for i in range(100):  # 1449
                start = time.time()
                sess.run(
                    [image, annotation, pred, acc_update_op, miou_update_op])
                print('{}: cost {:.0f}ms'.format(i,
                                                 (time.time() - start) * 1000))
            acc_res = sess.run(acc)
            miou_res = sess.run(miou)
            print("Pascal VOC 2012 validation dataset pixel accuracy: " +
                  str(acc_res))
            print("Pascal VOC 2012 validation dataset Mean IoU: " +
                  str(miou_res))

            coord.request_stop()
            coord.join(threads)
    pass