def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels, all_filenames, filename_queue = image_processing.inputs(
            dataset)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class
        num_classes = dataset.num_classes() + 1
        print("there are %d classes!" % dataset.num_classes())

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _, end_points, net2048, sel_end_points = inception.inference(
            images, num_classes)

        # Calculate predictions.
        #max_percent =  tf.argmax(logits,1)
        #max_percent = tf.reduce_max(logits, reduction_indices=[1]) / tf.add_n(logits)
        max_percent = end_points['predictions']
        # max_percent = len(end_points)
        #for kk in range(len(labels)):
        #   #max_percent.append(end_points['predictions'][kk][labels[kk]])
        #   max_percent.append(labels[kk])
        if FLAGS.mode == '0_softmax':
            top_1_op = tf.nn.in_top_k(logits, labels, 1)
            top_5_op = tf.nn.in_top_k(logits, labels, 5)
        elif FLAGS.mode == '1_sigmoid':
            top_1_op = None
            top_5_op = None
        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)

        while True:
            precision_at_1, current_score = _eval_once(
                saver, summary_writer, top_1_op, top_5_op, summary_op,
                max_percent, all_filenames, filename_queue, net2048,
                sel_end_points, logits, labels)
            print("%s: Precision: %.4f " % (datetime.now(), precision_at_1))
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
        return precision_at_1, current_score
Esempio n. 2
0
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels, all_filenames, filename_queue = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class
    num_classes = dataset.num_classes() + 1
    print("there are %d classes!" % dataset.num_classes())

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _, end_points, net2048, sel_end_points = inception.inference(images, num_classes)

    # Calculate predictions.
    #max_percent =  tf.argmax(logits,1)
    #max_percent = tf.reduce_max(logits, reduction_indices=[1]) / tf.add_n(logits)
    max_percent = end_points['predictions']
    # max_percent = len(end_points)
    #for kk in range(len(labels)):
    #   #max_percent.append(end_points['predictions'][kk][labels[kk]])
    #   max_percent.append(labels[kk])
    if FLAGS.mode == '0_softmax':
      top_1_op = tf.nn.in_top_k(logits, labels, 1)
      top_5_op = tf.nn.in_top_k(logits, labels, 5)
    elif FLAGS.mode == '1_sigmoid':
      top_1_op = None
      top_5_op = None
    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def)

    while True:
      precision_at_1, current_score = _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op, max_percent, all_filenames, filename_queue, net2048, sel_end_points, logits, labels)
      print("%s: Precision: %.4f " % (datetime.now(), precision_at_1) )
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
    return precision_at_1, current_score
Esempio n. 3
0
def evaluate_op(dataset):
    # Get images and labels from the dataset.
    images, labels, _ = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    #num_classes = dataset.num_classes() + 1
    num_classes = dataset.num_classes()

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)

    return top_1_op, top_5_op
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels = image_processing.inputs(dataset)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes() + 1

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _ = inception.inference(images, num_classes)
        # print(logits.get_shape())
        # print(labels.get_shape())

        # Calculate predictions.
        # top_1_op = tf.nn.in_top_k(logits, labels, 1)
        # top_5_op = tf.nn.in_top_k(logits, labels, 5)
        label_bool_op = evaluate_multilabel(logits, labels)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)

        # while True:
        #     _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
        #     if FLAGS.run_once:
        #         break
        #     time.sleep(FLAGS.eval_interval_secs)
        while True:
            _eval_once(saver, summary_writer, label_bool_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
Esempio n. 5
0
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels,filenames = image_processing.inputs(dataset)
    #print(images.shape)
    #print(labels.shape)
    #print(filenames.shape)
    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1
    #for i in range(FLAGS.num_gpus):
    #with tf.device('/gpu:%d' % (FLAGS.eval_gpu_id)):
    # Build a Graph that computes the logits predictions from the
    # inference model.
    endpoints = inception.inference_endpoint(images, num_classes)
    #positive_labels = 2
    # Calculate predictions.
    logits = endpoints['logits']
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    #top_5_op = tf.nn.in_top_k(logits, labels, 5)
    positive_op = endpoints['predictions']


    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op,positive_op, summary_op,filenames)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Esempio n. 6
0
def predict(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels, filenames = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes()

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
      inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    _predict_once(saver, filenames, logits)
Esempio n. 7
0
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels = image_processing.inputs(dataset)

    # Number of classes in the Dataset label.
    num_classes = dataset.num_classes()

    # Number of examples in the Dataset.
    num_examples_dataset = dataset.num_examples_per_epoch()

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    if not FLAGS.sparse_labels:
      labels = tf.argmax(labels, axis=1)
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op, num_examples_dataset)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Esempio n. 8
0
def test(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, _, filenames = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    output = tf.nn.softmax(tf.slice(logits, [0,1], [-1,-1]), name='output')

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)


    results = _test(saver, filenames, output)

    current_time = datetime.now().strftime('%Y-%m-%d-%Hh%Mm%Ss')
    csvfilename = os.path.join(FLAGS.test_dir, 'submission-{}.csv'.format(current_time))
    zipfilename = os.path.join(FLAGS.test_dir, '{}.zip'.format(csvfilename))

    with open(csvfilename, 'wb') as csvfile:
      writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
      writer.writerow(['img', 'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'])
      for batch_result in results:
        for filename, result in batch_result:
          writer.writerow([filename] + result.tolist())

    with zipfile.ZipFile(zipfilename, 'w') as myzip:
      myzip.write(csvfilename)

    print('Submission available at: %s' % (zipfilename))
Esempio n. 9
0
def build_input(dataset, data_path, batch_size, standardize_images, mode):
    if dataset == 'mnist':
        from datasets import mnist
        return mnist.build_input(data_path, batch_size, standardize_images, mode)
    elif dataset == 'svhn':
        from datasets import svhn
        return svhn.build_input(data_path, batch_size, standardize_images, mode)
    elif dataset == 'cifar10':
        from datasets import cifar
        return cifar.build_input(dataset, data_path, batch_size, standardize_images, mode)
    elif dataset == 'cifar100':
        from datasets import cifar
        return cifar.build_input(dataset, data_path, batch_size, standardize_images, mode)
    elif dataset == 'imagenet':
        from inception import image_processing
        from inception.imagenet_data import ImagenetData
        images, labels = image_processing.inputs(ImagenetData('validation'),
                                                 batch_size=batch_size)
        import tensorflow as tf
        labels = tf.one_hot(labels, 1001)
        return images, labels
    else:
        raise ValueError("Dataset {} not supported".format(dataset))
Esempio n. 10
0
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)
    max_percent = end_points['predictions']

    # Calculate predictions.
    # top_1_op = tf.nn.in_top_k(logits, labels, 1)
    # top_5_op = tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels, all_filenames, filename_queue = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _, end_points, net2048, sel_end_points = inception.inference(images, num_classes)

    # Calculate predictions.
    #max_percent =  tf.argmax(logits,1)
    #max_percent = tf.reduce_max(logits, reduction_indices=[1]) / tf.add_n(logits)
    max_percent = end_points['predictions']
    # max_percent = len(end_points)
    #for kk in range(len(labels)):
    #   #max_percent.append(end_points['predictions'][kk][labels[kk]])
    #   max_percent.append(labels[kk])
    #top_1_op =  tf.nn.in_top_k(logits, labels, 1)
    #top_5_op =  tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def)

    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1


    '''
     # Split the batch of images and labels for towers.
    images_splits = tf.split(axis=0, num_or_size_splits=1, value=images)
    labels_splits = tf.split(axis=0, num_or_size_splits=1, value=labels)

    # Calculate the gradients for each model tower.
    tower_grads = []
    reuse_variables = None

    for i in range(1):
      with tf.device('/gpu:%d' % i):
        with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
          # Force all Variables to reside on the CPU.
          with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
            # Calculate the loss for one tower of the ImageNet model. This
            # function constructs the entire ImageNet model but shares the
            # variables across all towers.
            loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,
                               scope, reuse_variables)

            # Reuse variables for the next tower.
            reuse_variables = True
    '''
    loss = False

    while True:
      precision_at_1, current_score = _eval_once(saver, summary_writer, summary_op, max_percent, all_filenames, filename_queue, net2048, sel_end_points, logits, labels, loss)
      print("%s: Precision: %.4f --------------------" % (datetime.now(), precision_at_1) )
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
    return precision_at_1, current_score
def retrieve(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default(), tf.Session() as sess:
    # Get images and labels from the dataset.
    images, labels, filenames_tensor = image_processing.inputs(dataset, return_filenames=True)


    # Build a Graph that computes the features.
    num_classes = dataset.num_classes() + 1
    _, _ = inception.inference(images, num_classes, restore_logits=False)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Restore checkpoint.
    ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
    if ckpt and ckpt.model_checkpoint_path:
      if os.path.isabs(ckpt.model_checkpoint_path):
        # Restores from checkpoint with absolute path.
        saver.restore(sess, ckpt.model_checkpoint_path)
      else:
        # Restores from checkpoint with relative path.
        saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
                                         ckpt.model_checkpoint_path))

      # Assuming model_checkpoint_path looks something like:
      #   /my-favorite-path/imagenet_train/model.ckpt-0,
      # extract global_step from it.
      global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
      print('Succesfully loaded model from %s at step=%s.' %
            (ckpt.model_checkpoint_path, global_step))
    else:
      print('No checkpoint file found')
      return

    # Start the queue runners.
    coord = tf.train.Coordinator()
    try:
      threads = []
      for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
        threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                         start=True))

      num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))

      print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))
      start_time = time.time()
      features_tensor = tf.get_default_graph().get_tensor_by_name(FLAGS.features_tensor_name)
      features = []
      filenames = []
      step = 0
      while step < num_iter and not coord.should_stop():
        features_batch, filenames_batch = sess.run([features_tensor, filenames_tensor])
        features.append(features_batch)
        filenames.extend(filenames_batch)

        step += 1
        if step % 20 == 0:
          duration = time.time() - start_time
          sec_per_batch = duration / 20.0
          examples_per_sec = FLAGS.batch_size / sec_per_batch
          print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
                'sec/batch)' % (datetime.now(), step, num_iter,
                                examples_per_sec, sec_per_batch))
          start_time = time.time()
      features = features[:FLAGS.num_examples]
      filenames = filenames[:FLAGS.num_examples]

    except Exception as e:  # pylint: disable=broad-except
      coord.request_stop(e)

    coord.request_stop()
    coord.join(threads, stop_grace_period_secs=10)

    return np.vstack(features), filenames
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels from the dataset.
        images, labels = image_processing.inputs(dataset)

        # Number of classes in the Dataset label set plus 1.
        # Label 0 is reserved for an (unused) background class.
        num_classes = dataset.num_classes() + 1

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits, _ = inception.inference(images, num_classes)
        pred = tf.nn.softmax(logits)

        top_1_op = tf.nn.in_top_k(logits, labels, 1)

        # Calculate predictions.
        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                                graph_def=graph_def)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                if os.path.isabs(ckpt.model_checkpoint_path):
                    # Restores from checkpoint with absolute path.
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    # Restores from checkpoint with relative path.
                    saver.restore(
                        sess,
                        os.path.join(FLAGS.checkpoint_dir,
                                     ckpt.model_checkpoint_path))

                # Assuming model_checkpoint_path looks something like:
                #   /my-favorite-path/imagenet_train/model.ckpt-0,
                # extract global_step from it.
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                print('Succesfully loaded model from %s at step=%s.' %
                      (ckpt.model_checkpoint_path, global_step))
            else:
                print('No checkpoint file found')
                return

            # Start the queue runners.
            coord = tf.train.Coordinator()
            try:
                threads = []
                for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                    threads.extend(
                        qr.create_threads(sess,
                                          coord=coord,
                                          daemon=True,
                                          start=True))
                num_iter = int(math.ceil(FLAGS.num_examples /
                                         FLAGS.batch_size))
                # Counts the number of correct predictions.
                test_acc = 0.0
                count_top_1 = 0
                confusion_m_all = []
                total_sample_count = num_iter * FLAGS.batch_size
                step = 0

                print('%s: starting evaluation on (%s).' %
                      (datetime.now(), FLAGS.subset))
                start_time = time.time()
                while step < num_iter and not coord.should_stop():
                    pred, labels, top_1 = sess.run([pred, labels, top_1_op])
                    print(pred.shape)
                    print(labels.shape)
                    #correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1))
                    correct_pred = np.equal(np.argmax(pred, 1), labels)
                    #print (correct_pred)
                    test_acc += np.sum(correct_pred.astype(float))

                    confu_m = confusion_matrix(labels, np.argmax(
                        pred, 1))  #(np.argmax(labels,1), np.argmax(pred,1))
                    confusion_m_all.append(confu_m)
                    #top_1, top_5 = sess.run([top_1_op, top_5_op])
                    count_top_1 += np.sum(top_1)
                    #count_top_5 += np.sum(top_5)
                    step += 1
                    '''
	        if step % 20 == 0:
	          duration = time.time() - start_time
	          sec_per_batch = duration / 20.0
	          examples_per_sec = FLAGS.batch_size / sec_per_batch
	          print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
	                'sec/batch)' % (datetime.now(), step, num_iter,
	                                examples_per_sec, sec_per_batch))
	          start_time = time.time()
	        '''

                # Compute precision @ 1
                '''
	      precision_at_1 = count_top_1 / total_sample_count
	      #recall_at_5 = count_top_5 / total_sample_count
	      print('%s: precision @ 1 = %.4f  [%d examples]' %
	            (datetime.now(), precision_at_1, total_sample_count))
	      '''
                print(confusion_m_all.shape)
                exit()
                confusion_m_average = np.sum(confusion_m_all, axis=0)
                print(confusion_m_average)

                test_acc = float(test_acc) / float(total_sample_count)
                print("Test Accuracy: {} \n".format(test_acc))

                summary = tf.Summary()
                summary.ParseFromString(sess.run(summary_op))
                summary.value.add(tag='Precision @ 1',
                                  simple_value=precision_at_1)
                #summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)
                summary_writer.add_summary(summary, global_step)

            except Exception as e:  # pylint: disable=broad-except
                coord.request_stop(e)

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)