Esempio n. 1
0
def evaluate(train_dir):
  """Loads the model and runs evaluation
  """

  target_dir = os.path.join(train_dir, "model_files")  
  params = imp.load_source("params", os.path.join(target_dir, "params.py"))
  data_input = imp.load_source("input", os.path.join(target_dir, "input.py"))
  network = imp.load_source("network", os.path.join(target_dir, "network.py"))

  with tf.Graph().as_default():
    
    # Retrieve images and labels
    eval_data = FLAGS.eval_data == 'test'
    images, labels =  data_input.inputs(eval_data=eval_data, data_dir=utils.cfg.data_dir,
                                           batch_size=params.batch_size)

    # Generate placeholders for the images and labels.
    keep_prob = utils.placeholder_inputs(params.batch_size)

    # Build a Graph that computes predictions from the inference model.
    logits = network.inference(images, keep_prob)

    # Add to the Graph the Ops for loss calculation.
    loss = network.loss(logits, labels)
  
    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = network.evaluation(logits, labels)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    # Run the Op to initialize the variables.
    init = tf.initialize_all_variables()
    sess.run(init)

    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)

    ckpt = tf.train.get_checkpoint_state(train_dir)
    if ckpt and ckpt.model_checkpoint_path:
      saver.restore(sess, ckpt.model_checkpoint_path)
    else:
      print("No checkpoints found! ")
      exit(1)

    print("Doing Evaluation with lots of data")  
    utils.do_eval(sess=sess,
                  eval_correct=eval_correct,
                  keep_prob=keep_prob, 
                  num_examples=params.num_examples_per_epoch_for_eval,
                  params=params,
                  name="eval")
def evaluate_last():
    """Loads the model and runs evaluation
  """

    with tf.Graph().as_default():
        # Get images and labels for CIFAR-10.
        model_dir = os.path.join(FLAGS.model_dir, FLAGS.name)

        eval_data = FLAGS.eval_data == "test"
        images, labels = data_input.inputs(eval_data=eval_data, data_dir=FLAGS.data_dir, batch_size=FLAGS.batch_size)

        # images, labels =  data_input.distorted_inputs(eval_data=eval_data, data_dir=FLAGS.data_dir,
        #                                       batch_size=FLAGS.batch_size)

        # Generate placeholders for the images and labels.
        keep_prob = utils.placeholder_inputs(FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = model.inference(images, keep_prob)

        # Add to the Graph the Ops for loss calculation.
        loss = model.loss(logits, labels)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = model.evaluation(logits, labels)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     cifar10.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        # Build the summary operation based on the TF collection of Summaries.
        # summary_op = tf.merge_all_summaries()

        # graph_def = tf.get_default_graph().as_graph_def()
        # summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
        # 	                                     graph_def=graph_def)

        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        print(model_dir)

        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("No checkpoints found! ")
            exit(1)

        print("Doing Evaluation with lots of data")
        utils.do_eval(
            sess=sess,
            eval_correct=eval_correct,
            keep_prob=keep_prob,
            num_examples=data_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL,
        )
Esempio n. 3
0
def run_training():
    """Train BinaryConnect."""
    # Get the sets of images and labels for training, validation, and
    # test on CIFAR10.
    data_sets = cifar10.read_data_sets(dst_dir='./dataset',
                                       validation_size=5000)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, train_placeholder = placeholder_inputs(
            FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = bc.inference_bin(images_placeholder, train_placeholder,
                                  stochastic=FLAGS.stochastic,
                                  use_bnorm=True) \
            if FLAGS.binary \
            else bc.inference_ref(images_placeholder, train_placeholder,
                                  use_bnorm=True)

        # Add to the Graph the Ops for loss calculation.
        loss = bc.loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = bc.training(loss, FLAGS.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_metric = bc.evaluation(logits, labels_placeholder)

        # Add a placeholder for logging execution time
        # frequency_placeholder = tf.placeholder(tf.float32, shape=())
        # tf.summary.scalar('Execution Time', frequency_placeholder)
        # TODO: support a d separate summary for metadata (e.g. execution time)

        # Build the summary Tensor based on the TF collection of Summaries.
        summary = tf.summary.merge_all()

        # Add the variable initializer Op.
        ivars = tf.global_variables() + tf.local_variables()
        init = tf.variables_initializer(ivars)

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a logger to the validation accuracy
        val_acc_pl = tf.placeholder(tf.float32, shape=())
        summary_val_acc = tf.summary.scalar(name='validation_acc',
                                            tensor=val_acc_pl,
                                            collections=['validation'])
        summary_val = tf.summary.merge([summary_val_acc])

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.log_dir, 'train'), sess.graph)
        summary_writer_val = tf.summary.FileWriter(
            os.path.join(FLAGS.log_dir, 'val'), sess.graph)

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        duration = 0
        tp_value_total = 0
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder, train_placeholder,
                                       True)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value, acc_val = sess.run([train_op, loss, eval_metric],
                                              feed_dict=feed_dict)

            duration += time.time() - start_time
            tp_value_total += acc_val

            # Write the summaries and print an overview fairly often.
            if step % 100 == 0 and step > 0:
                # Print status to stdout.
                images_freq = 100 * FLAGS.batch_size / duration
                print(
                    'Step %d: loss = %.2f, correct = %.2f%% (%.3f images/sec)'
                    % (step, loss_value, tp_value_total / FLAGS.batch_size,
                       images_freq))
                duration = time.time() - start_time
                tp_value_total = 0
                duration = 0
                # Update the events file.
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer_train.add_summary(summary_str, step)
                summary_writer_train.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 500 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                # print('Training Data Eval:')
                # do_eval(sess,
                #         eval_metric,
                #         images_placeholder,
                #         labels_placeholder,
                #         train_placeholder,
                #         data_sets.train, summary)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                accuracy_val = do_eval(sess, eval_metric, images_placeholder,
                                       labels_placeholder, train_placeholder,
                                       data_sets.validation)
                # TODO: find a way to collect summaries for validation
                summary_str = sess.run(summary_val,
                                       feed_dict={val_acc_pl: accuracy_val})
                summary_writer_val.add_summary(summary_str, step)
                summary_writer_val.flush()

                # Evaluate against the test set.
                print('Test Data Eval:')
                do_eval(sess, eval_metric, images_placeholder,
                        labels_placeholder, train_placeholder, data_sets.test)
Esempio n. 4
0
def evaluate_last():
    """Loads the model and runs evaluation
  """

    with tf.Graph().as_default():
        # Get images and labels for CIFAR-10.
        model_dir = os.path.join(FLAGS.model_dir, FLAGS.name)

        eval_data = FLAGS.eval_data == 'test'
        images, labels = data_input.inputs(eval_data=eval_data,
                                           data_dir=FLAGS.data_dir,
                                           batch_size=FLAGS.batch_size)

        #images, labels =  data_input.distorted_inputs(eval_data=eval_data, data_dir=FLAGS.data_dir,
        #                                       batch_size=FLAGS.batch_size)

        # Generate placeholders for the images and labels.
        keep_prob = utils.placeholder_inputs(FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = model.inference(images, keep_prob)

        # Add to the Graph the Ops for loss calculation.
        loss = model.loss(logits, labels)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = model.evaluation(logits, labels)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     cifar10.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        # Build the summary operation based on the TF collection of Summaries.
        # summary_op = tf.merge_all_summaries()

        #graph_def = tf.get_default_graph().as_graph_def()
        #summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
        #	                                     graph_def=graph_def)

        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        print(model_dir)

        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("No checkpoints found! ")
            exit(1)

        print("Doing Evaluation with lots of data")
        utils.do_eval(sess=sess,
                      eval_correct=eval_correct,
                      keep_prob=keep_prob,
                      num_examples=data_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL)
Esempio n. 5
0
def run_training():
  """Train model for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.

  # Tell TensorFlow that the model will be built into the default Graph.
  train_dir = os.path.join(FLAGS.model_dir,FLAGS.name)

  with tf.Graph().as_default():

    global_step = tf.Variable(0, trainable=False)

    with tf.name_scope('Input'):
      image_batch, label_batch = data_input.distorted_inputs(FLAGS.data_dir, FLAGS.batch_size)
    # Generate placeholders for the images and labels.
      keep_prob = utils.placeholder_inputs(FLAGS.batch_size)

    # Build a Graph that computes predictions from the inference model.
    logits = model.inference(image_batch, keep_prob)

    # Add to the Graph the Ops for loss calculation.
    loss = model.loss(logits, label_batch)

    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = model.training(loss, global_step=global_step, learning_rate=FLAGS.learning_rate)

    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = model.evaluation(logits, label_batch)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    # Run the Op to initialize the variables.
    init = tf.initialize_all_variables()
    sess.run(init)

    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)

    # Instantiate a SummaryWriter to output summaries and the Graph.
    summary_writer = tf.train.SummaryWriter(train_dir,
                                            graph_def=sess.graph_def)

    # And then after everything is built, start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()

      # Fill a feed dictionary with the actual set of images and labels
      # for this particular training step.
      feed_dict = utils.fill_feed_dict(keep_prob,
                                       train = True)

      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)

      # Write the summaries and print an overview fairly often.
      if step % 100 == 0:
        # Print status to stdout.
        duration = time.time() - start_time
        examples_per_sec = FLAGS.batch_size / duration
        sec_per_batch = float(duration)
        print('Step %d: loss = %.2f ( %.3f sec (per Batch); %.1f examples/sec;)' % (step, loss_value,
                                     sec_per_batch, examples_per_sec))
        # Update the events file.
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)

      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_path = os.path.join(train_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path , global_step=step)
        # Evaluate against the training set.

      if (step + 1) % 10000 == 0 or (step + 1) == FLAGS.max_steps:  
        print('Training Data Eval:')
        utils.do_eval(sess,
                      eval_correct,
                      keep_prob,
                      data_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
Esempio n. 6
0
def evaluate(train_dir):
    """Loads the model and runs evaluation
  """

    target_dir = os.path.join(train_dir, "model_files")
    params = imp.load_source("params", os.path.join(target_dir, "params.py"))
    data_input = imp.load_source("input", os.path.join(target_dir, "input.py"))
    network = imp.load_source("network", os.path.join(target_dir,
                                                      "network.py"))

    with tf.Graph().as_default():

        # Retrieve images and labels
        eval_data = FLAGS.eval_data == 'test'
        images, labels = data_input.inputs(eval_data=eval_data,
                                           data_dir=utils.cfg.data_dir,
                                           batch_size=params.batch_size)

        # Generate placeholders for the images and labels.
        keep_prob = utils.placeholder_inputs(params.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = network.inference(images, keep_prob)

        # Add to the Graph the Ops for loss calculation.
        loss = network.loss(logits, labels)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = network.evaluation(logits, labels)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        ckpt = tf.train.get_checkpoint_state(train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("No checkpoints found! ")
            exit(1)

        print("Doing Evaluation with lots of data")
        utils.do_eval(sess=sess,
                      eval_correct=eval_correct,
                      keep_prob=keep_prob,
                      num_examples=params.num_examples_per_epoch_for_eval,
                      params=params,
                      name="eval")