Ejemplo n.º 1
0
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = resnet18.inputs(eval_data=eval_data)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = resnet18.inference(images)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 5)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            resnet18.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
Ejemplo n.º 2
0
def evaluate(k=5):
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        images, labels = resnet18.inputs(eval_data=True)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        W1 = tf.placeholder(tf.float32, [7, 7, 3, 64])
        W2_1_b2a = tf.placeholder(tf.float32, [3, 3, 64, 64])
        W2_1_b2b = tf.placeholder(tf.float32, [3, 3, 64, 64])
        W2_2_b2a = tf.placeholder(tf.float32, [3, 3, 64, 64])
        W2_2_b2b = tf.placeholder(tf.float32, [3, 3, 64, 64])
        W3_1_b1 = tf.placeholder(tf.float32, [1, 1, 64, 128])
        W3_1_b2a = tf.placeholder(tf.float32, [3, 3, 64, 128])
        W3_1_b2b = tf.placeholder(tf.float32, [3, 3, 128, 128])
        W3_2_b2a = tf.placeholder(tf.float32, [3, 3, 128, 128])
        W3_2_b2b = tf.placeholder(tf.float32, [3, 3, 128, 128])
        W4_1_b1 = tf.placeholder(tf.float32, [1, 1, 128, 256])
        W4_1_b2a = tf.placeholder(tf.float32, [3, 3, 128, 256])
        W4_1_b2b = tf.placeholder(tf.float32, [3, 3, 256, 256])
        W4_2_b2a = tf.placeholder(tf.float32, [3, 3, 256, 256])
        W4_2_b2b = tf.placeholder(tf.float32, [3, 3, 256, 256])
        W5_1_b1 = tf.placeholder(tf.float32, [1, 1, 256, 512])
        W5_1_b2a = tf.placeholder(tf.float32, [3, 3, 256, 512])
        W5_1_b2b = tf.placeholder(tf.float32, [3, 3, 512, 512])
        W5_2_b2a = tf.placeholder(tf.float32, [3, 3, 512, 512])
        W5_2_b2b = tf.placeholder(tf.float32, [3, 3, 512, 512])
        logits = resnet18.inference(images, W1, W2_1_b2a, W2_1_b2b, W2_2_b2a,
                                    W2_2_b2b, W3_1_b1, W3_1_b2a, W3_1_b2b,
                                    W3_2_b2a, W3_2_b2b, W4_1_b1, W4_1_b2a,
                                    W4_1_b2b, W4_2_b2a, W4_2_b2b, W5_1_b1,
                                    W5_1_b2a, W5_1_b2b, W5_2_b2a, W5_2_b2b)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, k)

        # Restore the moving average version of the learned variables for eval.
        #    variable_averages = tf.train.ExponentialMovingAverage(
        #        resnet18.MOVING_AVERAGE_DECAY)
        #    variables_to_restore = variable_averages.variables_to_restore()
        #    saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver()

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            precision = eval_once(saver, summary_writer, top_k_op, summary_op,
                                  k, W1, W2_1_b2a, W2_1_b2b, W2_2_b2a,
                                  W2_2_b2b, W3_1_b1, W3_1_b2a, W3_1_b2b,
                                  W3_2_b2a, W3_2_b2b, W4_1_b1, W4_1_b2a,
                                  W4_1_b2b, W4_2_b2a, W4_2_b2b, W5_1_b1,
                                  W5_1_b2a, W5_1_b2b, W5_2_b2a, W5_2_b2b)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
    return precision
Ejemplo n.º 3
0
def tower_loss(scope, images, labels):
    """Calculate the total loss on a single tower running the CIFAR model.
  Args:
    scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
    images: Images. 4D tensor of shape [batch_size, height, width, 3].
    labels: Labels. 1D tensor of shape [batch_size].
  Returns:
     Tensor of shape [] containing the total loss for a batch of data
  """

    # Build inference Graph.
    logits = resnet18.inference(images, train=True)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = resnet18.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on tensorboard.
        loss_name = re.sub('%s_[0-9]*/' % resnet18.TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)

    return total_loss
Ejemplo n.º 4
0
def tower_loss(scope, images, labels):
    # Build inference Graph.
    logits = resnet18.inference(images,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                None,
                                train=True)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = resnet18.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on tensorboard.
        loss_name = re.sub('%s_[0-9]*/' % resnet18.TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)

    return total_loss