def testWithEpochLimit(self):
    predictions_limited = input.limit_epochs(self._predictions, num_epochs=1)
    labels_limited = input.limit_epochs(self._labels, num_epochs=1)

    value_op, update_op = metric_ops.streaming_accuracy(
        predictions_limited, labels_limited)

    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    # Create checkpoint and log directories:
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to a checkpoint directory:
    saver = saver_lib.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    # Now, run the evaluation loop:
    accuracy_value = evaluation.evaluation_loop(
        '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
        max_number_of_evaluations=1, num_evals=10000)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
Esempio n. 2
0
    def _fn():
      x = constant_op.constant(points)
      if batch_size == num_points:
        return input_lib.limit_epochs(x, num_epochs=num_epochs), None
      if randomize:
        indices = random_ops.random_uniform(
            constant_op.constant([batch_size]),
            minval=0,
            maxval=num_points - 1,
            dtype=dtypes.int32,
            seed=10)
      else:
        # We need to cycle through the indices sequentially. We create a queue
        # to maintain the list of indices.
        q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())

        # Conditionally initialize the Queue.
        def _init_q():
          with ops.control_dependencies(
              [q.enqueue_many(math_ops.range(num_points))]):
            return control_flow_ops.no_op()

        init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
                                       control_flow_ops.no_op)
        with ops.control_dependencies([init_q]):
          offsets = q.dequeue_many(batch_size)
          with ops.control_dependencies([q.enqueue_many(offsets)]):
            indices = array_ops.identity(offsets)
      batch = array_ops.gather(x, indices)
      return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
Esempio n. 3
0
 def _fn():
   x = constant_op.constant(points)
   if batch_size == num_points:
     return input_lib.limit_epochs(x, num_epochs=num_epochs), None
   indices = random_ops.random_uniform(
       constant_op.constant([batch_size]),
       minval=0,
       maxval=num_points - 1,
       dtype=dtypes.int32,
       seed=10)
   batch = array_ops.gather(x, indices)
   return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
Esempio n. 4
0
 def _input_fn(num_epochs=None):
   features = {
       'age':
           input_lib.limit_epochs(
               constant_op.constant([[0.8], [0.15], [0.]]),
               num_epochs=num_epochs),
       'language':
           sparse_tensor.SparseTensor(
               values=input_lib.limit_epochs(
                   ['en', 'fr', 'zh'], num_epochs=num_epochs),
               indices=[[0, 0], [0, 1], [2, 0]],
               dense_shape=[3, 2])
   }
   return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
 def _input_fn(num_epochs=None):
   features = {
       'age':
           input_lib.limit_epochs(
               constant_op.constant(((50,), (20,), (10,))),
               num_epochs=num_epochs),
       'language':
           sparse_tensor.SparseTensor(
               values=input_lib.limit_epochs(
                   ('en', 'fr', 'zh'), num_epochs=num_epochs),
               indices=((0, 0), (0, 1), (2, 0)),
               dense_shape=(3, 2))
   }
   return features, constant_op.constant(
       ((0.8,), (0.,), (0.2,)), dtype=dtypes.float32)
 def _eval_input_fn():
   feature_map = parsing_ops.parse_example(
       input_lib.limit_epochs(serialized_examples, num_epochs=1),
       feature_spec)
   features = linear_testing_utils.queue_parsed_features(feature_map)
   labels = features.pop('y')
   return features, labels
Esempio n. 7
0
 def _predict_input_fn():
   feature_map = parsing_ops.parse_example(
       input_lib.limit_epochs(serialized_examples, num_epochs=1),
       feature_spec)
   features = _queue_parsed_features(feature_map)
   features.pop('y')
   return features, None
Esempio n. 8
0
def boston_input_fn(num_epochs=None):
    boston = base.load_boston()
    features = input_lib.limit_epochs(array_ops.reshape(
        constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
                                      num_epochs=num_epochs)
    labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
    return features, labels
Esempio n. 9
0
 def _eval_input_fn():
     feature_map = parsing_ops.parse_example(
         input_lib.limit_epochs(serialized_examples, num_epochs=1),
         feature_spec)
     features = _queue_parsed_features(feature_map)
     labels = features.pop('y')
     return features, labels
Esempio n. 10
0
 def _predict_input_fn():
   feature_map = parsing_ops.parse_example(
       input_lib.limit_epochs(serialized_examples, num_epochs=1),
       feature_spec)
   features = linear_testing_utils.queue_parsed_features(feature_map)
   features.pop('y')
   return features, None
Esempio n. 11
0
def boston_input_fn(num_epochs=None):
  boston = base.load_boston()
  features = input_lib.limit_epochs(
      array_ops.reshape(
          constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
      num_epochs=num_epochs)
  labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
  return features, labels
Esempio n. 12
0
 def _input_fn(num_epochs=None):
   # Create 4 rows, one of them (y = x), three of them (y=Not(x))
   labels = constant_op.constant([[1.], [0.], [0.], [0.]])
   features = {
       'x':
           input_lib.limit_epochs(
               array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
               num_epochs=num_epochs),
   }
   return features, labels
Esempio n. 13
0
  def testTrainWithEpochLimit(self):
    logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                          'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
      tf_inputs_limited = input_lib.limit_epochs(tf_inputs, num_epochs=300)
      tf_labels_limited = input_lib.limit_epochs(tf_labels, num_epochs=300)

      tf_predictions = LogisticClassifier(tf_inputs_limited)
      loss_ops.log_loss(tf_predictions, tf_labels_limited)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(train_op, logdir, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
    self.assertTrue(os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
    self.assertTrue(os.path.isfile('{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
Esempio n. 14
0
  def testTrainWithEpochLimit(self):
    logdir = tempfile.mkdtemp('tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)
      tf_inputs_limited = input_lib.limit_epochs(tf_inputs, num_epochs=300)
      tf_labels_limited = input_lib.limit_epochs(tf_labels, num_epochs=300)

      tf_predictions = LogisticClassifier(tf_inputs_limited)
      loss_ops.log_loss(tf_labels_limited, tf_predictions)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(train_op, logdir, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
    self.assertTrue(os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
    self.assertTrue(
        os.path.isfile('{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
Esempio n. 15
0
 def _input_fn():
     return (input_lib.limit_epochs(constant_op.constant(points),
                                    num_epochs=1), None)
Esempio n. 16
0
 def _predict_input_fn():
   features = parsing_ops.parse_example(
       input_lib.limit_epochs(serialized_examples, num_epochs=1),
       feature_spec)
   features.pop('label')
   return features, None
Esempio n. 17
0
 def _input_fn():
   return (input_lib.limit_epochs(
       constant_op.constant(points), num_epochs=1), None)
Esempio n. 18
0
 def _predict_input_fn():
     features = parsing_ops.parse_example(
         input_lib.limit_epochs(serialized_examples, num_epochs=1),
         feature_spec)
     features.pop('label')
     return features, None
Esempio n. 19
0
 def _eval_input_fn():
     features = parsing_ops.parse_example(
         input_lib.limit_epochs(serialized_examples, num_epochs=1),
         feature_spec)
     labels = features.pop('label')
     return features, labels
Esempio n. 20
0
 def _input_fn(instances):
     features = {
         'features':
             input_lib.limit_epochs(tf.constant(instances), num_epochs=1)
     }
     return features, None
Esempio n. 21
0
 def _eval_input_fn():
   features = parsing_ops.parse_example(
       input_lib.limit_epochs(serialized_examples, num_epochs=1),
       feature_spec)
   labels = features.pop('label')
   return features, labels