예제 #1
0
파일: eval.py 프로젝트: WeixiZhu94/MNIST
def main(train_dir, batch_size, num_batches, log_dir, checkpoint_dir=None):
    if checkpoint_dir is None:
        checkpoint_dir = log_dir

    images, labels = inputs(train_dir, False, batch_size, num_batches)
    predictions, total_loss = network(images, labels)

    tf.summary.scalar('loss', total_loss)
    predictions = tf.to_int32(tf.argmax(predictions, 1))

    tf.summary.scalar('accuracy', slim.metrics.accuracy(predictions, labels))

    # These are streaming metrics which compute the "running" metric,
    # e.g running accuracy
    metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
        'accuracy':
        slim.metrics.streaming_accuracy(predictions, labels),
    })

    # Define the streaming summaries to write:
    for metric_name, metric_value in metrics_to_values.items():
        tf.summary.scalar(metric_name, metric_value)

    # Evaluate every 30 seconds
    slim.evaluation.evaluation_loop('',
                                    checkpoint_dir,
                                    log_dir,
                                    num_evals=1,
                                    eval_op=list(metrics_to_updates.values()),
                                    summary_op=tf.summary.merge_all(),
                                    eval_interval_secs=30,
                                    max_number_of_evaluations=100000000)
예제 #2
0
def main(train_dir, batch_size, num_batches, log_dir):
    images, labels = inputs(train_dir,
                            True,
                            batch_size,
                            num_batches)
    predictions, total_loss = network(images, labels)

    tf.summary.scalar('loss', total_loss)

    optimizer = tf.train.GradientDescentOptimizer(0.001)
    train_op = slim.learning.create_train_op(total_loss, optimizer, summarize_gradients=True)

    slim.learning.train(train_op, log_dir, save_summaries_secs=10, save_interval_secs=10)
예제 #3
0
파일: train.py 프로젝트: WeixiZhu94/MNIST
def main(train_dir, batch_size, num_batches, log_dir):
    images, labels = inputs(train_dir, True, batch_size, num_batches)
    predictions, total_loss = network(images, labels)

    tf.summary.scalar('loss', total_loss)

    optimizer = tf.train.MomentumOptimizer(learning_rate=0.025,
                                           momentum=0.9,
                                           use_nesterov=True)
    train_op = slim.learning.create_train_op(total_loss,
                                             optimizer,
                                             summarize_gradients=True)

    slim.learning.train(train_op,
                        log_dir,
                        save_summaries_secs=20,
                        save_interval_secs=20)