예제 #1
0
def build_heatmap(dataset, heat_map):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, cords = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        _, _, prob_ops = inception.inference(images, num_classes)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)

        # _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op, logits, labels, dense_labels)
        heat_map = generate_heatmap(saver, dataset, summary_writer, prob_ops,
                                    cords, summary_op, heat_map)

        return heat_map
def build_heatmap(dataset, heat_map, model_name, wsi_filename):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, cords = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        _, _, prob_ops = inception.inference(images, num_classes)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def)

        # _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op, logits, labels, dense_labels)
        heat_map = generate_heatmap(saver, dataset, model_name, prob_ops, cords, heat_map, wsi_filename)

        return heat_map
def _tower_loss(images, labels, num_classes, scope, reuse_variables=None):
    """Calculate the total loss on a single tower running the ImageNet model.

    We perform 'batch splitting'. This means that we cut up a batch across
    multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
    then each tower will operate on an batch of 16 images.

    Args:
      images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
                                         FLAGS.image_size, 3].
      labels: 1-D integer Tensor of [batch_size].
      num_classes: number of classes
      scope: unique prefix string identifying the ImageNet tower, e.g.
        'tower_0'.

    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """
    # When fine-tuning a model, we do not restore the logits but instead we
    # randomly initialize the logits. The number of classes in the output of the
    # logit is the number of classes in specified Dataset.
    restore_logits = not FLAGS.fine_tune

    # Build inference Graph.
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        logits = inception.inference(images, num_classes, for_training=True,
                                     restore_logits=restore_logits,
                                     scope=scope)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    split_batch_size = images.get_shape().as_list()[0]
    inception.loss(logits, labels, batch_size=split_batch_size)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)

    # Calculate the total loss for the current tower.
    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    total_loss = tf.add_n(losses + regularization_losses, name='total_loss')

    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on TensorBoard.
        loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.summary.scalar(loss_name + ' (raw)', l)
        tf.summary.scalar(loss_name, loss_averages.average(l))

    with tf.control_dependencies([loss_averages_op]):
        total_loss = tf.identity(total_loss)
    return total_loss
예제 #4
0
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _, _ = inception.inference(images, num_classes)

        sparse_labels = tf.reshape(labels, [BATCH_SIZE, 1])
        indices = tf.reshape(tf.range(BATCH_SIZE), [BATCH_SIZE, 1])
        concated = tf.concat(1, [indices, sparse_labels])
        num_classes = logits[0].get_shape()[-1].value
        dense_labels = tf.sparse_to_dense(concated, [BATCH_SIZE, num_classes],
                                          1, 0)

        confusion_matrix_op = metrics.confusion_matrix(
            labels, tf.argmax(logits, axis=1))
        # false_positive_op = metrics.streaming_false_positives(logits, dense_labels)
        # false_negative_op = metrics.streaming_false_negatives(logits, dense_labels)

        # Calculate predictions.
        accuracy = tf.nn.in_top_k(logits, labels, 1)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)

        while True:
            # _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op, logits, labels, dense_labels)

            _eval_once(saver, summary_writer, accuracy, summary_op,
                       confusion_matrix_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _, _ = inception.inference(images, num_classes)

        sparse_labels = tf.reshape(labels, [BATCH_SIZE, 1])
        indices = tf.reshape(tf.range(BATCH_SIZE), [BATCH_SIZE, 1])
        concated = tf.concat(1, [indices, sparse_labels])
        num_classes = logits[0].get_shape()[-1].value
        dense_labels = tf.sparse_to_dense(concated,
                                          [BATCH_SIZE, num_classes],
                                          1, 0)

        confusion_matrix_op = metrics.confusion_matrix(labels, tf.argmax(logits, axis=1))
        # false_positive_op = metrics.streaming_false_positives(logits, dense_labels)
        # false_negative_op = metrics.streaming_false_negatives(logits, dense_labels)

        # Calculate predictions.
        accuracy = tf.nn.in_top_k(logits, labels, 1)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def)

        while True:
            # _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op, logits, labels, dense_labels)

            _eval_once(saver, summary_writer, accuracy, summary_op, confusion_matrix_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
예제 #6
0
def build_heatmap(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, cords = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        assert BATCH_SIZE % FLAGS.num_threads == 0, 'BATCH_SIZE must be divisible by FLAGS.num_threads'

        # Build a Graph that computes the logits predictions from the
        # inference model.
        images_splits = tf.split(images, FLAGS.num_threads, axis=0)
        cords_splits = tf.split(cords, FLAGS.num_threads, axis=0)

        prob_ops = []
        cords_ops = []
        for i in range(FLAGS.num_threads):
            with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
                with slim.arg_scope([slim.variables.variable],
                                    device='/cpu:%d' % i):
                    print('i=%d' % i)
                    _, _, prob_op = inception.inference(images_splits[i],
                                                        num_classes,
                                                        scope=scope)
                    cords_op = tf.reshape(
                        cords_splits[i],
                        (int(BATCH_SIZE / FLAGS.num_threads), 1))
                    prob_ops.append(prob_op)
                    cords_ops.append(cords_op)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)

        generate_heatmap(saver, dataset, summary_writer, prob_ops, cords_ops,
                         summary_op)
def build_heatmap(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, cords = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        assert BATCH_SIZE % FLAGS.num_threads == 0, 'BATCH_SIZE must be divisible by FLAGS.num_threads'

        # Build a Graph that computes the logits predictions from the
        # inference model.
        images_splits = tf.split(images, FLAGS.num_threads, axis=0)
        cords_splits = tf.split(cords, FLAGS.num_threads, axis=0)

        prob_ops = []
        cords_ops = []
        for i in range(FLAGS.num_threads):
            with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
                with slim.arg_scope([slim.variables.variable], device='/cpu:%d' % i):
                    print('i=%d' % i)
                    _, _, prob_op = inception.inference(images_splits[i], num_classes, scope=scope)
                    cords_op = tf.reshape(cords_splits[i], (int(BATCH_SIZE/FLAGS.num_threads), 1))
                    prob_ops.append(prob_op)
                    cords_ops.append(cords_op)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def)

        generate_heatmap(saver, dataset, summary_writer, prob_ops, cords_ops, summary_op)
예제 #8
0
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _ = inception.inference(images, num_classes)

        print(logits.get_shape())
        print(labels.get_shape())

        # Calculate predictions.
        top_1_op = tf.nn.in_top_k(logits, labels, 1)
        top_5_op = tf.nn.in_top_k(logits, labels, 5)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)

        while True:
            _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels = image_processing.inputs(dataset, BATCH_SIZE)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _ = inception.inference(images, num_classes)

        print(logits.get_shape())
        print(labels.get_shape())

        # Calculate predictions.
        top_1_op = tf.nn.in_top_k(logits, labels, 1)
        top_5_op = tf.nn.in_top_k(logits, labels, 5)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def)

        while True:
            _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
def _tower_loss(images, labels, num_classes, scope, reuse_variables=None):
    """Calculate the total loss on a single tower running the ImageNet model.

    We perform 'batch splitting'. This means that we cut up a batch across
    multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
    then each tower will operate on an batch of 16 images.

    Args:
      images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
                                         FLAGS.image_size, 3].
      labels: 1-D integer Tensor of [batch_size].
      num_classes: number of classes
      scope: unique prefix string identifying the ImageNet tower, e.g.
        'tower_0'.

    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """
    # When fine-tuning a model, we do not restore the logits but instead we
    # randomly initialize the logits. The number of classes in the output of the
    # logit is the number of classes in specified Dataset.
    restore_logits = not FLAGS.fine_tune

    # Build inference Graph.
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        logits = inception.inference(images,
                                     num_classes,
                                     for_training=True,
                                     restore_logits=restore_logits,
                                     scope=scope)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    split_batch_size = images.get_shape().as_list()[0]
    inception.loss(logits, labels, batch_size=split_batch_size)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)

    # Calculate the total loss for the current tower.
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
    total_loss = tf.add_n(losses + regularization_losses, name='total_loss')

    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on TensorBoard.
        loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.summary.scalar(loss_name + ' (raw)', l)
        tf.summary.scalar(loss_name, loss_averages.average(l))

    with tf.control_dependencies([loss_averages_op]):
        total_loss = tf.identity(total_loss)
    return total_loss