Esempio n. 1
0
 def test_do_not_stop_if_checkpoint_is_not_there(self):
     with ops.Graph().as_default():
         step = training.create_global_step()
         assign_ten = step.assign(10)
         no_op = control_flow_ops.no_op()
         hook = hooks_lib._StopAtCheckpointStepHook(
             model_dir=tempfile.mkdtemp(), last_step=10)
         with training.SingularMonitoredSession(hooks=[hook]) as mon_sess:
             mon_sess.raw_session().run(assign_ten)
             with test.mock.patch.object(time, 'sleep') as mock_sleep:
                 mon_sess.run(no_op)
                 self.assertTrue(mock_sleep.called)
             self.assertFalse(mon_sess.should_stop())
Esempio n. 2
0
def _materialize_locally(tensors,
                         num_steps=1,
                         feed_dict=None,
                         safety_size=1e9):
    """Materialize the given tensors locally, during initialization.

  Assumes non-distributed environment (uses SingularMonitoredSession).

  Args:
    tensors: tensors to be materialized: array or dict.
    num_steps: number of steps to run. Usually it's faster/easier to run in
      one step, a large batch. Set it to 0 or None to run until queue is
      exhausted, when a OutOfRangeError exception is raised -- typically when
      an input_fn is set to run for a fixed num_epochs.
    feed_dict: optional feed_dict.
    safety_size: if num_steps is None and one created input_fn to loop
      indefinitely (num_epochs=None), this could loop consuming memory. This
      is a safety limit on memory to prevent that. Increase this is you actually
      need more than these many elements in your results, or set num_steps.

  Returns:
    Materialized tensors as array or dict, like `tensors` arg.

  Raises:
    ValueError: for negative num_steps.
    errors.OutOfRangeError: if can't read num_steps times.
  """
    if num_steps and num_steps < 0:
        raise ValueError("can not run with num_steps=%s" % num_steps)

    # training.SingularMonitoredSession silently catches errors.OutOfRangeError,
    # and we want to expose it.
    error = None
    with training.SingularMonitoredSession() as sess:
        try:
            splits = []
            if not num_steps:
                # Run until queue exhausted.
                try:
                    count = 0
                    while True:
                        r = sess.run(tensors, feed_dict=feed_dict)
                        count += _get_size(r)
                        if count > safety_size:
                            raise ValueError(
                                "Unbound (num_steps=None) materialization of "
                                "input reached safety size of {}".format(
                                    safety_size))
                        splits.append(r)
                except errors.OutOfRangeError:
                    pass
            else:
                # Run num_steps times.
                splits = [
                    sess.run(tensors, feed_dict=feed_dict)
                    for _ in range(num_steps)
                ]
            if isinstance(splits[0], dict):
                materialized = {}
                for k in splits[0].keys():
                    materialized[k] = np.concatenate(
                        [splits[i][k] for i in range(len(splits))])
            else:
                materialized = np.concatenate(splits)
        except (errors.OutOfRangeError, StopIteration) as ex:
            error = ex
    if error:
        raise error  # pylint: disable=raising-bad-type
    return materialized
Esempio n. 3
0
 def _materialize_locally(self, tensors, feed_dict=None):
   with training.SingularMonitoredSession() as sess:
     materialized = sess.run(tensors, feed_dict=feed_dict)
   return materialized