Beispiel #1
0
def main(args):
    # load the dataset
    dataset = mnist.get_split('test', FLAGS.data_dir)

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size,
        is_training=False)

    # get the model prediction
    predictions = lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.iteritems():
        tf.summary.scalar(metric_name, metric_value)

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
    slim.evaluation.evaluation_loop(
        '',
        FLAGS.checkpoint_dir,
        FLAGS.log_dir,
        num_evals=FLAGS.num_evals,
        eval_op=metrics_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs)
Beispiel #2
0
def main(args):
    dataset = mnist.get_split('train', '/tmp/mnist')

    images, labels = load_batch(dataset, BATCHSIZE, is_training=True)

    with slim.arg_scope(lenet.lenet_arg_scope()):
        logits, end_points = lenet.lenet(images, is_training=True)

    one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)

    tf.losses.softmax_cross_entropy(one_hot_labels, logits)

    total_loss = tf.losses.get_total_loss() * LOSS_SCALING_FACTOR

    tf.summary.scalar('loss', total_loss / LOSS_SCALING_FACTOR)

    optimiser = tf.train.GradientDescentOptimizer(LEARNING_RATE)

    train_op = tf.contrib.training.create_train_op(
        total_loss,
        optimiser,
        summarize_gradients=True,
        transform_grads_fn=scale_down_grads)

    for i in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
        print(i)

    slim.learning.train(
        train_op,
        './log/train_3',
        save_summaries_secs=2,
        #session_wrapper=tf_debug.LocalCLIDebugWrapperSession
    )
Beispiel #3
0
def main(args):
    # load the dataset
    dataset = mnist.get_split('train', FLAGS.data_dir)

    # load batch of dataset
    images, labels = load_batch(dataset, FLAGS.batch_size, is_training=True)

    network_fn = nets_factory.get_network_fn("lenet",
                                             num_classes=10,
                                             is_training=True)
    # run the image through the model
    #    predictions,_ = lenet.lenet(images)
    predictions, _ = network_fn(images)
    #    slim.model_analyzer.analyze_ops(tf.get_default_graph(), print_info=True)
    variables = slim.get_model_variables()
    for var in variables:
        tf.summary.histogram(var.op.name, var)
    slim.model_analyzer.analyze_vars(variables, print_info=True)
    # get the cross-entropy loss
    one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
    tf.losses.softmax_cross_entropy(one_hot_labels, predictions)
    total_loss = tf.losses.get_total_loss()
    tf.summary.scalar('loss', total_loss)

    # use RMSProp to optimize
    optimizer = tf.train.RMSPropOptimizer(0.001, 0.9)

    # create train op
    train_op = slim.learning.create_train_op(total_loss, optimizer)

    # run training
    slim.learning.train(train_op,
                        FLAGS.log_dir,
                        save_summaries_secs=20,
                        save_interval_secs=60 * 2)
Beispiel #4
0
def main(args):
    tf.logging.set_verbosity(tf.logging.DEBUG)
    # load the dataset
    dataset = mnist.get_split('test', FLAGS.data_dir)

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size,
        is_training=False)
    print(images,labels)
    # get the model prediction
    predictions,_ = lenet.lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
#        'Recall_3': slim.metrics.streaming_recall_at_k(predictions, labels, 3),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.items():
        summary_name = 'eval/%s' % metric_name
        tf.summary.scalar(summary_name, metric_value)
        
#    for name, value in metrics_to_values.items():
#        summary_name = 'eval/%s' % name
#        op = tf.summary.scalar(summary_name, value, collections=[])
#        op = tf.Print(op, [value], summary_name)
#        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
        
        

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
#    slim.evaluation.evaluation_loop(
#        '',
#        FLAGS.checkpoint_dir,
#        FLAGS.log_dir,
#        num_evals=FLAGS.num_evals,
#        eval_op=list(metrics_to_updates.values()),
#        eval_interval_secs=FLAGS.eval_interval_secs)

    checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
    num_batches = math.ceil(10000 / float(FLAGS.batch_size))
    metric_values =slim.evaluation.evaluate_once(
        master ='',
        checkpoint_path =checkpoint_path,
        logdir =FLAGS.log_dir,
        num_evals=num_batches,
        eval_op=list(metrics_to_updates.values()),
        final_op=list(metrics_to_values.values()) 
        )
    for metric, value in zip(metrics_to_values.keys(), metric_values):
     print("%s: %f" %(metric, value))
def main(args):
    # load the dataset
    dataset = mnist.get_split('train', FLAGS.data_dir)

    # load batch of dataset
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size,
        is_training=True)

    # run the image through the model
    predictions = lenet(images)

    # get the cross-entropy loss
    one_hot_labels = slim.one_hot_encoding(
        labels,
        dataset.num_classes)
    slim.losses.softmax_cross_entropy(
        predictions,
        one_hot_labels)
    total_loss = slim.losses.get_total_loss()
    tf.summary.scalar('loss', total_loss)

    # use RMSProp to optimize
    optimizer = tf.train.RMSPropOptimizer(0.001, 0.9)

    # create train op
    train_op = slim.learning.create_train_op(
        total_loss,
        optimizer,
        summarize_gradients=True)

    # run training
    slim.learning.train(
        train_op,
        FLAGS.log_dir,
        save_summaries_secs=20)