Ejemplo n.º 1
0
    def test_train_max_steps_is_not_incremental(self):
        with tf.Graph().as_default() as g, self.test_session(g):
            with tf.control_dependencies(self._build_inference_graph()):
                train_op = tf.assign_add(
                    tf.contrib.framework.get_global_step(), 1)
            learn.graph_actions.train(g,
                                      output_dir=self._output_dir,
                                      train_op=train_op,
                                      loss_op=tf.constant(2.0),
                                      max_steps=10)
            step = checkpoints.load_variable(
                self._output_dir,
                tf.contrib.framework.get_global_step().name)
            self.assertEqual(10, step)

        with tf.Graph().as_default() as g, self.test_session(g):
            with tf.control_dependencies(self._build_inference_graph()):
                train_op = tf.assign_add(
                    tf.contrib.framework.get_global_step(), 1)
            learn.graph_actions.train(g,
                                      output_dir=self._output_dir,
                                      train_op=train_op,
                                      loss_op=tf.constant(2.0),
                                      max_steps=15)
            step = checkpoints.load_variable(
                self._output_dir,
                tf.contrib.framework.get_global_step().name)
            self.assertEqual(15, step)
Ejemplo n.º 2
0
 def weights_(self):
   hiddenlayer_weights = [checkpoints.load_variable(
       self._model_dir, name=("dnn/hiddenlayer_%d/weights" % i))
                          for i, _ in enumerate(self._hidden_units)]
   logits_weights = [checkpoints.load_variable(
       self._model_dir, name="dnn/logits/weights")]
   return hiddenlayer_weights + logits_weights
Ejemplo n.º 3
0
    def test_train_skip_train_if_max_step_already_saved(self):
        with tf.Graph().as_default() as g, self.test_session(g):
            with tf.control_dependencies(self._build_inference_graph()):
                train_op = tf.assign_add(
                    tf.contrib.framework.get_global_step(), 1)
            learn.graph_actions._supervised_train(  # pylint: disable=protected-access
                g,
                output_dir=self._output_dir,
                train_op=train_op,
                loss_op=tf.constant(2.0),
                max_steps=10)
            step = checkpoints.load_variable(
                self._output_dir,
                tf.contrib.framework.get_global_step().name)
            self.assertEqual(10, step)

        with tf.Graph().as_default() as g, self.test_session(g):
            with tf.control_dependencies(self._build_inference_graph()):
                train_op = tf.assign_add(
                    tf.contrib.framework.get_global_step(), 1)
            learn.graph_actions._supervised_train(  # pylint: disable=protected-access
                g,
                output_dir=self._output_dir,
                train_op=train_op,
                loss_op=tf.constant(2.0),
                max_steps=10)
            step = checkpoints.load_variable(
                self._output_dir,
                tf.contrib.framework.get_global_step().name)
            self.assertEqual(10, step)
Ejemplo n.º 4
0
  def test_train_skip_train_if_max_step_already_saved(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=tf.constant(2.0),
          max_steps=10)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)

    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=tf.constant(2.0),
          max_steps=10)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)
Ejemplo n.º 5
0
 def bias_(self):
     hiddenlayer_bias = [
         checkpoints.load_variable(self._model_dir, name=("dnn/hiddenlayer_%d/biases" % i))
         for i, _ in enumerate(self._hidden_units)
     ]
     logits_bias = [checkpoints.load_variable(self._model_dir, name="dnn/logits/biases")]
     centered_bias = [checkpoints.load_variable(self._model_dir, name=_CENTERED_BIAS_WEIGHT)]
     return hiddenlayer_bias + logits_bias + centered_bias
 def testGetTensor(self):
   checkpoint_dir = self.get_temp_dir()
   with self.test_session() as session:
     v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
   self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var1"), v1)
   self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var2"), v2)
   self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var3"), v3)
   self.assertAllEqual(
       checkpoints.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
Ejemplo n.º 7
0
 def testGetTensor(self):
   checkpoint_dir = self.get_temp_dir()
   with self.test_session() as session:
     v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
   self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var1"), v1)
   self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var2"), v2)
   self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var3"), v3)
   self.assertAllEqual(
       checkpoints.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
Ejemplo n.º 8
0
 def weights_(self):
     hiddenlayer_weights = [
         checkpoints.load_variable(self._model_dir,
                                   name=("dnn/hiddenlayer_%d/weights" % i))
         for i, _ in enumerate(self._hidden_units)
     ]
     logits_weights = [
         checkpoints.load_variable(self._model_dir,
                                   name="dnn/logits/weights")
     ]
     return hiddenlayer_weights + logits_weights
Ejemplo n.º 9
0
  def get_bias(self, model_dir):
    """Returns the bias of the model.

    Args:
      model_dir: Directory where model parameters, graph and etc. are saved.

    Returns:
      The bias weights created by this model.
    """
    return [checkpoints.load_variable(
        model_dir, name=(self._scope+"/hiddenlayer_%d/biases" % i))
            for i, _ in enumerate(self._hidden_units)] + [
                checkpoints.load_variable(
                    model_dir, name=(self._scope+"/logits/biases"))]
Ejemplo n.º 10
0
 def bias_(self):
     hiddenlayer_bias = [
         checkpoints.load_variable(self._model_dir,
                                   name=("dnn/hiddenlayer_%d/biases" % i))
         for i, _ in enumerate(self._hidden_units)
     ]
     logits_bias = [
         checkpoints.load_variable(self._model_dir,
                                   name="dnn/logits/biases")
     ]
     centered_bias = [
         checkpoints.load_variable(self._model_dir,
                                   name=_CENTERED_BIAS_WEIGHT)
     ]
     return hiddenlayer_bias + logits_bias + centered_bias
Ejemplo n.º 11
0
def print_tensors_in_checkpoint_file(file_name, tensor_name):
  """Prints tensors in a checkpoint file.

  If no `tensor_name` is provided, prints the tensor names and shapes
  in the checkpoint file.

  If `tensor_name` is provided, prints the content of the tensor.

  Args:
    file_name: Name of the checkpoint file.
    tensor_name: Name of the tensor in the checkpoint file to print.
  """
  try:
    if not tensor_name:
      variables = checkpoints.list_variables(file_name)
      for name, shape in variables:
        print("%s\t%s" % (name, str(shape)))
    else:
      print("tensor_name: ", tensor_name)
      print(checkpoints.load_variable(file_name, tensor_name))
  except Exception as e:  # pylint: disable=broad-except
    print(str(e))
    if "corrupted compressed block contents" in str(e):
      print("It's likely that your checkpoint file has been compressed "
            "with SNAPPY.")
Ejemplo n.º 12
0
 def testNoTensor(self):
     checkpoint_dir = self.get_temp_dir()
     with self.test_session() as session:
         _, _, _, _ = _create_checkpoints(session, checkpoint_dir)
     with self.assertRaises(tf.errors.OpError):
         self.assertAllEqual(
             checkpoints.load_variable(checkpoint_dir, "var5"), [])
def print_tensors_in_checkpoint_file(file_name, tensor_name):
    """Prints tensors in a checkpoint file.

  If no `tensor_name` is provided, prints the tensor names and shapes
  in the checkpoint file.

  If `tensor_name` is provided, prints the content of the tensor.

  Args:
    file_name: Name of the checkpoint file.
    tensor_name: Name of the tensor in the checkpoint file to print.
  """
    try:
        if not tensor_name:
            variables = checkpoints.list_variables(file_name)
            for name, shape in variables:
                print("%s\t%s" % (name, str(shape)))
        else:
            print("tensor_name: ", tensor_name)
            print(checkpoints.load_variable(file_name, tensor_name))
    except Exception as e:  # pylint: disable=broad-except
        print(str(e))
        if "corrupted compressed block contents" in str(e):
            print("It's likely that your checkpoint file has been compressed "
                  "with SNAPPY.")
Ejemplo n.º 14
0
    def get_bias(self, model_dir):
        """Returns the bias of the model.

    Args:
      model_dir: Directory where model parameters, graph and etc. are saved.

    Returns:
      The bias weights created by this model.
    """
        return [
            checkpoints.load_variable(
                model_dir, name=(self._scope + "/hiddenlayer_%d/biases" % i))
            for i, _ in enumerate(self._hidden_units)
        ] + [
            checkpoints.load_variable(model_dir,
                                      name=(self._scope + "/logits/biases"))
        ]
Ejemplo n.º 15
0
 def weights_(self):
     values = {}
     optimizer_regex = r".*/" + self._optimizer.get_name() + r"(_\d)?$"
     for name, _ in checkpoints.list_variables(self._model_dir):
         if name.startswith("linear/") and name != "linear/bias_weight" and not re.match(optimizer_regex, name):
             values[name] = checkpoints.load_variable(self._model_dir, name)
     if len(values) == 1:
         return values[list(values.keys())[0]]
     return values
Ejemplo n.º 16
0
 def weights_(self):
     values = {}
     optimizer_regex = r".*/" + self._optimizer.get_name() + r"(_\d)?$"
     for name, _ in checkpoints.list_variables(self._model_dir):
         if (name.startswith("linear/") and name != "linear/bias_weight"
                 and not re.match(optimizer_regex, name)):
             values[name] = checkpoints.load_variable(self._model_dir, name)
     if len(values) == 1:
         return values[list(values.keys())[0]]
     return values
Ejemplo n.º 17
0
    def get_variable_value(self, name):
        """Returns value of the variable given by name.

    Args:
      name: string, name of the tensor.

    Returns:
      Numpy array - value of the tensor.
    """
        return checkpoints.load_variable(self.model_dir, name)
Ejemplo n.º 18
0
  def test_train_max_steps_is_not_incremental(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions.train(g, output_dir=self._output_dir,
                                train_op=train_op, loss_op=tf.constant(2.0),
                                max_steps=10)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)

    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions.train(g, output_dir=self._output_dir,
                                train_op=train_op, loss_op=tf.constant(2.0),
                                max_steps=15)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(15, step)
Ejemplo n.º 19
0
    def get_bias(self, model_dir):
        """Returns bias of the model.

    Args:
      model_dir: Directory where model parameters, graph and etc. are saved.

    Returns:
      The bias weights created by this model.
    """
        return checkpoints.load_variable(model_dir,
                                         name=(self._scope + "/bias_weight"))
Ejemplo n.º 20
0
  def get_bias(self, model_dir):
    """Returns bias of the model.

    Args:
      model_dir: Directory where model parameters, graph and etc. are saved.

    Returns:
      The bias weights created by this model.
    """
    return checkpoints.load_variable(model_dir,
                                     name=(self._scope+"/bias_weight"))
Ejemplo n.º 21
0
  def get_variable_value(self, name):
    """Returns value of the variable given by name.

    Args:
      name: string, name of the tensor.

    Returns:
      Numpy array - value of the tensor.
    """
    if name.endswith(':0'):
      name = name[:-2]
    return checkpoints.load_variable(self.model_dir, name)
Ejemplo n.º 22
0
    def get_tensor_value(self, name):
        """Returns value of the tensor give by name.

    Args:
      name: string, name of the tensor.

    Returns:
      Numpy array - value of the tensor.
    """
        if name.endswith(':0'):
            name = name[:-2]
        return checkpoints.load_variable(self.model_dir, name)
Ejemplo n.º 23
0
  def get_weights(self, model_dir):
    """Returns weights per feature of the linear part.

    Args:
      model_dir: Directory where model parameters, graph and etc. are saved.

    Returns:
      The weights created by this model (without the optimizer weights).
    """
    all_variables = [name for name, _ in checkpoints.list_variables(model_dir)]
    values = {}
    optimizer_regex = r".*/" + self._get_optimizer().get_name() + r"(_\d)?$"
    for name in all_variables:
      if (name.startswith(self._scope + "/") and
          name != self._scope + "/bias_weight" and
          not re.match(optimizer_regex, name)):
        values[name] = checkpoints.load_variable(model_dir, name)
    if len(values) == 1:
      return values[list(values.keys())[0]]
    return values
Ejemplo n.º 24
0
    def get_weights(self, model_dir):
        """Returns weights per feature of the linear part.

    Args:
      model_dir: Directory where model parameters, graph and etc. are saved.

    Returns:
      The weights created by this model (without the optimizer weights).
    """
        all_variables = [
            name for name, _ in checkpoints.list_variables(model_dir)
        ]
        values = {}
        optimizer_regex = r".*/" + self._get_optimizer().get_name(
        ) + r"(_\d)?$"
        for name in all_variables:
            if (name.startswith(self._scope + "/")
                    and name != self._scope + "/bias_weight"
                    and not re.match(optimizer_regex, name)):
                values[name] = checkpoints.load_variable(model_dir, name)
        if len(values) == 1:
            return values[list(values.keys())[0]]
        return values
Ejemplo n.º 25
0
 def clusters(self):
   """Returns cluster centers."""
   return checkpoints.load_variable(self.model_dir, self.CLUSTERS)
Ejemplo n.º 26
0
def _train_internal(graph,
                    output_dir,
                    train_op,
                    loss_op,
                    global_step_tensor,
                    init_op,
                    init_feed_dict,
                    init_fn,
                    log_every_steps,
                    supervisor_is_chief,
                    supervisor_master,
                    supervisor_save_model_secs,
                    keep_checkpoint_max,
                    supervisor_save_summaries_steps,
                    feed_fn,
                    steps,
                    fail_on_nan_loss,
                    monitors,
                    max_steps):
  """See train."""
  if (steps is not None) and (max_steps is not None):
    raise ValueError('Can not provide both steps and max_steps.')
  if not output_dir:
    raise ValueError('Output directory should be non-empty %s.' % output_dir)
  if train_op is None:
    raise ValueError('Missing train_op.')
  if loss_op is None:
    raise ValueError('Missing loss_op.')

  with graph.as_default():
    global_step_tensor = contrib_variables.assert_or_get_global_step(
        graph, global_step_tensor)
    if global_step_tensor is None:
      raise ValueError('No "global_step" was provided or found in the graph.')

    # Get current step.
    try:
      start_step = checkpoints.load_variable(
          output_dir, global_step_tensor.name)
    except (errors.NotFoundError, ValueError):
      start_step = 0

    summary_writer = (get_summary_writer(output_dir)
                      if supervisor_is_chief else None)

    # Add default chief monitors if none were provided.
    if not monitors:
      monitors = monitors_lib.get_default_monitors(
          loss_op=loss_op,
          summary_op=logging_ops.get_summary_op(),
          save_summary_steps=supervisor_save_summaries_steps,
          summary_writer=summary_writer) if supervisor_is_chief else []

    # TODO(ipolosukhin): Replace all functionality of Supervisor
    # with Chief-Exclusive Monitors.
    if not supervisor_is_chief:
      # Prune list of monitor to the ones runnable on all workers.
      monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]

    if max_steps is None:
      max_steps = (start_step + steps) if steps else None
    # Start monitors, can create graph parts.
    for monitor in monitors:
      monitor.begin(max_steps=max_steps)

  supervisor = tf_supervisor.Supervisor(
      graph,
      init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
      init_feed_dict=init_feed_dict,
      is_chief=supervisor_is_chief,
      logdir=output_dir,
      saver=_make_saver(graph, keep_checkpoint_max),
      global_step=global_step_tensor,
      summary_op=None,
      summary_writer=summary_writer,
      save_model_secs=supervisor_save_model_secs,
      init_fn=init_fn)
  session = supervisor.PrepareSession(master=supervisor_master,
                                      start_standard_services=True)
  supervisor.StartQueueRunners(session)

  with session:
    get_current_step = lambda: session.run(global_step_tensor)

    start_step = get_current_step()
    last_step = start_step
    last_log_step = start_step
    loss_value = None
    logging.info('Training steps [%d,%s)', last_step, 'inf'
                 if max_steps is None else str(max_steps))

    excinfo = None
    try:
      while not supervisor.ShouldStop() and (
          (max_steps is None) or (last_step < max_steps)):
        start_time = time.time()
        feed_dict = feed_fn() if feed_fn is not None else None

        outputs, should_stop = _run_with_monitors(
            session, last_step + 1, [train_op, loss_op], feed_dict, monitors)

        loss_value = outputs[loss_op.name]
        if np.isnan(loss_value):
          failure_message = 'Model diverged with loss = NaN.'
          if fail_on_nan_loss:
            logging.error(failure_message)
            raise NanLossDuringTrainingError()
          else:
            logging.warning(failure_message)

        if should_stop:
          break

        this_step = get_current_step()

        if this_step <= last_step:
          logging.error(
              'Global step was not incremented by train op at step %s'
              ': new step %d', last_step, this_step)

        last_step = this_step
        is_last_step = (max_steps is not None) and (last_step >= max_steps)
        if is_last_step or (last_step - last_log_step >= log_every_steps):
          logging.info(
              'training step %d, loss = %.5f (%.3f sec/batch).',
              last_step, loss_value, float(time.time() - start_time))
          last_log_step = last_step
    except errors.OutOfRangeError as e:
      logging.warn('Got exception during tf.learn training loop possibly '
                   'due to exhausted input queue %s.', e)
    except StopIteration:
      logging.info('Exhausted input iterarator.')
    except BaseException as e:  # pylint: disable=broad-except
      # Hold on to any other exceptions while we try recording a final
      # checkpoint and summary.
      excinfo = sys.exc_info()
    finally:
      try:
        # Call supervisor.Stop() from within a try block because it re-raises
        # exceptions thrown by the supervised threads.
        supervisor.Stop(close_summary_writer=False)

        # Save one last checkpoint and summaries
        # TODO(wicke): This should be handled by Supervisor

        # In case we encountered an exception in the try block before we updated
        # last_step, update it here (again).
        last_step = get_current_step()
        if supervisor_is_chief:
          ckpt_path = supervisor.save_path
          logging.info('Saving checkpoint for step %d to checkpoint: %s.',
                       last_step, ckpt_path)
          supervisor.saver.save(session, ckpt_path, global_step=last_step)

          # Finish monitors.
          for monitor in monitors:
            monitor.end()

      # catch OutOfRangeError which is thrown when queue is out of data (and for
      # other reasons as well).
      except errors.OutOfRangeError as e:
        logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
                     'due to exhausted input queue. Note: summary_op is not '
                     'expected to trigger dequeues. %s.', e)
      except BaseException as e:  # pylint: disable=broad-except
        # If we don't already have an exception to re-raise, raise this one.
        if not excinfo:
          raise
        # Otherwise, log this one and raise the other in the finally block.
        logging.error('Got exception during tf.learn final checkpoint %s.', e)
      finally:
        if excinfo:
          reraise(*excinfo)
    return loss_value
Ejemplo n.º 27
0
def _supervised_train(graph,
                      output_dir,
                      train_op,
                      loss_op,
                      global_step_tensor=None,
                      init_op=None,
                      init_feed_dict=None,
                      init_fn=None,
                      log_every_steps=10,
                      supervisor_is_chief=True,
                      supervisor_master='',
                      supervisor_save_model_steps=1000,
                      keep_checkpoint_max=5,
                      supervisor_save_summaries_steps=100,
                      feed_fn=None,
                      steps=None,
                      fail_on_nan_loss=True,
                      monitors=None,
                      max_steps=None):
  """Train a model via supervised_session.

  Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
  run a training loop. The given `train_op` performs one step of training on the
  model. The `loss_op` represents the objective function of the training. It is
  expected to increment the `global_step_tensor`, a scalar integer tensor
  counting training steps. This function uses `Supervisor` to initialize the
  graph (from a checkpoint if one is available in `output_dir`), write summaries
  defined in the graph, and write regular checkpoints as defined by
  `supervisor_save_model_secs`.

  Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
  `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
  program is terminated with exit code 1.

  Args:
    graph: A graph to train. It is expected that this graph is not in use
      elsewhere.
    output_dir: A directory to write outputs to.
    train_op: An op that performs one training step when run.
    loss_op: A scalar loss tensor.
    global_step_tensor: A tensor representing the global step. If none is given,
      one is extracted from the graph using the same logic as in `Supervisor`.
    init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
      default.
    init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
      This feed dictionary will be used when `init_op` is evaluated.
    init_fn: Optional callable passed to Supervisor to initialize the model.
    log_every_steps: Output logs regularly. The logs contain timing data and the
      current loss.
    supervisor_is_chief: Whether the current process is the chief supervisor in
      charge of restoring the model and running standard services.
    supervisor_master: The master string to use when preparing the session.
    supervisor_save_model_steps: Save a checkpoint every
      `supervisor_save_model_steps` steps when training.
    keep_checkpoint_max: The maximum number of recent checkpoint files to
      keep. As new files are created, older files are deleted. If None or 0,
      all checkpoint files are kept. This is simply passed as the max_to_keep
      arg to tf.Saver constructor.
    supervisor_save_summaries_steps: Save summaries every
      `supervisor_save_summaries_steps` seconds when training.
    feed_fn: A function that is called every iteration to produce a `feed_dict`
      passed to `session.run` calls. Optional.
    steps: Trains for this many steps (e.g. current global step + `steps`).
    fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
      evaluates to `NaN`. If false, continue training as if nothing happened.
    monitors: List of `BaseMonitor` subclass instances. Used for callbacks
      inside the training loop.
    max_steps: Number of total steps for which to train model. If `None`,
      train forever. Two calls fit(steps=100) means 200 training iterations.
      On the other hand two calls of fit(max_steps=100) means, second call
      will not do any iteration since first call did all 100 steps.

  Returns:
    The final loss value.

  Raises:
    ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
      is not provided. See `tf.contrib.framework.get_global_step` for how we
      look up the latter if not provided explicitly.
    NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
      evaluates to `NaN`.
    ValueError: If both `steps` and `max_steps` are not `None`.
  """
  if (steps is not None) and (max_steps is not None):
    raise ValueError('Can not provide both steps and max_steps.')
  if not output_dir:
    raise ValueError('Output directory should be non-empty %s.' % output_dir)
  if train_op is None:
    raise ValueError('Missing train_op.')
  if loss_op is None:
    raise ValueError('Missing loss_op.')
  if monitors is None:
    monitors = []
  if not isinstance(monitors, list):
    raise ValueError('Monitors should be a list.')
  with graph.as_default():
    global_step_tensor = contrib_variables.assert_or_get_global_step(
        graph, global_step_tensor)
  if global_step_tensor is None:
    raise ValueError('No "global_step" was provided or found in the graph.')

  if max_steps is not None:
    try:
      start_step = checkpoints.load_variable(output_dir,
                                             global_step_tensor.name)
      if max_steps <= start_step:
        logging.info('Skipping training since max_steps has already saved.')
        return None
    except:  # pylint: disable=bare-except
      pass

  with graph.as_default():
    # See question about adding the summary writer to the scaffold.
    if supervisor_is_chief:
      summary_writer = summary_writer_cache.SummaryWriterCache.get(output_dir)
      monitors.extend([
          monitors_lib.StepCounter(summary_writer=summary_writer),
          monitors_lib.NanLoss(loss_op,
                               fail_on_nan_loss=fail_on_nan_loss),
          monitors_lib.PrintTensor({'loss': loss_op.name},
                                   every_n=log_every_steps),
      ])

    # Finalize graph and add savers
    # TODO(ispir): remove keep_checkpoint_max from Scaffold interface
    scaffold = supervised_session.Scaffold(
        global_step_tensor=global_step_tensor,
        init_op=init_op,
        init_feed_dict=init_feed_dict,
        init_fn=init_fn,
        keep_checkpoint_max=keep_checkpoint_max)
    if supervisor_is_chief:
      if scaffold.summary_op is not None:
        monitors.append(monitors_lib.SummarySaver(
            scaffold.summary_op,
            save_steps=supervisor_save_summaries_steps,
            summary_writer=summary_writer))
      if supervisor_save_model_steps > 0:
        monitors.append(
            monitors_lib.CheckpointSaver(supervisor_save_model_steps,
                                         scaffold.saver, output_dir))

    if steps is not None or max_steps is not None:
      monitors.append(monitors_lib.StopAtStep(steps, max_steps))
    if not supervisor_is_chief:
      # Prune list of monitor to the ones runnable on all workers.
      monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]

    with supervised_session.SupervisedSession(supervisor_master,
                                              is_chief=supervisor_is_chief,
                                              checkpoint_dir=output_dir,
                                              monitors=monitors,
                                              scaffold=scaffold) as super_sess:
      loss = None
      while not super_sess.should_stop():
        _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else
                                 None)
      return loss
Ejemplo n.º 28
0
def train(graph,
          output_dir,
          train_op,
          loss_op,
          global_step_tensor=None,
          init_op=None,
          init_feed_dict=None,
          init_fn=None,
          log_every_steps=10,
          supervisor_is_chief=True,
          supervisor_master='',
          supervisor_save_model_secs=600,
          supervisor_save_summaries_steps=100,
          feed_fn=None,
          steps=None,
          fail_on_nan_loss=True,
          monitors=None):
  """Train a model.

  Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
  run a training loop. The given `train_op` performs one step of training on the
  model. The `loss_op` represents the objective function of the training. It is
  expected to increment the `global_step_tensor`, a scalar integer tensor
  counting training steps. This function uses `Supervisor` to initialize the
  graph (from a checkpoint if one is available in `output_dir`), write summaries
  defined in the graph, and write regular checkpoints as defined by
  `supervisor_save_model_secs`.

  Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
  `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
  program is terminated with exit code 1.

  Args:
    graph: A graph to train. It is expected that this graph is not in use
      elsewhere.
    output_dir: A directory to write outputs to.
    train_op: An op that performs one training step when run.
    loss_op: A scalar loss tensor.
    global_step_tensor: A tensor representing the global step. If none is given,
      one is extracted from the graph using the same logic as in `Supervisor`.
    init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
      default.
    init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
      This feed dictionary will be used when `init_op` is evaluated.
    init_fn: Optional callable passed to Supervisor to initialize the model.
    log_every_steps: Output logs regularly. The logs contain timing data and the
      current loss.
    supervisor_is_chief: Whether the current process is the chief supervisor in
      charge of restoring the model and running standard services.
    supervisor_master: The master string to use when preparing the session.
    supervisor_save_model_secs: Save a checkpoint every
      `supervisor_save_model_secs` seconds when training.
    supervisor_save_summaries_steps: Save summaries every
      `supervisor_save_summaries_steps` seconds when training.
    feed_fn: A function that is called every iteration to produce a `feed_dict`
      passed to `session.run` calls. Optional.
    steps: Trains for this many steps (e.g. current global step + `steps`).
    fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
      evaluates to `NaN`. If false, continue training as if nothing happened.
    monitors: List of `BaseMonitor` subclass instances. Used for callbacks
      inside the training loop.

  Returns:
    The final loss value.

  Raises:
    ValueError: If `global_step_tensor` is not provided. See
        `tf.contrib.framework.get_global_step` for how we look it up if not
        provided explicitly.
    NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
        evaluates to `NaN`.
  """
  if not output_dir:
    raise ValueError('Output directory should be non-empty.')

  with graph.as_default():
    global_step_tensor = contrib_variables.assert_or_get_global_step(
        graph, global_step_tensor)
    if global_step_tensor is None:
      raise ValueError('No "global_step" was provided or found in the graph.')

    # Get current step.
    try:
      start_step = checkpoints.load_variable(
          output_dir, global_step_tensor.name)
    except (errors.NotFoundError, ValueError):
      start_step = 0

    summary_writer = (get_summary_writer(output_dir)
                      if supervisor_is_chief else None)

    # TODO(ipolosukhin): Replace all functionality of Supervisor with Monitors.
    if not supervisor_is_chief:
      # monitors should run only on the chief.
      monitors = []
    elif not monitors:
      monitors = monitors_lib.get_default_monitors(
          loss_op=loss_op,
          summary_op=logging_ops.get_summary_op(),
          save_summary_steps=supervisor_save_summaries_steps,
          summary_writer=summary_writer)

    max_steps = (start_step + steps) if steps else None
    # Start monitors, can create graph parts.
    for monitor in monitors:
      monitor.begin(max_steps=max_steps)

  supervisor = tf_supervisor.Supervisor(
      graph,
      init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
      init_feed_dict=init_feed_dict,
      is_chief=supervisor_is_chief,
      logdir=output_dir,
      saver=_make_saver(graph),
      global_step=global_step_tensor,
      summary_op=None,
      summary_writer=summary_writer,
      save_model_secs=supervisor_save_model_secs,
      init_fn=init_fn)
  session = supervisor.PrepareSession(master=supervisor_master,
                                      start_standard_services=True)
  supervisor.StartQueueRunners(session)

  with session:
    get_current_step = lambda: session.run(global_step_tensor)

    start_step = get_current_step()
    last_step = start_step
    last_log_step = start_step
    loss_value = None
    logging.info('Training steps [%d,%s)', last_step, 'inf'
                 if max_steps is None else str(max_steps))

    excinfo = None
    try:
      while not supervisor.ShouldStop() and (
          (max_steps is None) or (last_step < max_steps)):
        start_time = time.time()
        feed_dict = feed_fn() if feed_fn is not None else None

        outputs, should_stop = _run_with_monitors(
            session, last_step + 1, [train_op, loss_op], feed_dict, monitors)

        loss_value = outputs[loss_op.name]
        if np.isnan(loss_value):
          failure_message = 'Model diverged with loss = NaN.'
          if fail_on_nan_loss:
            logging.error(failure_message)
            raise NanLossDuringTrainingError()
          else:
            logging.warning(failure_message)

        if should_stop:
          break

        this_step = get_current_step()

        if this_step <= last_step:
          logging.error(
              'Global step was not incremented by train op at step %s'
              ': new step %d', last_step, this_step)

        last_step = this_step
        is_last_step = (max_steps is not None) and (last_step >= max_steps)
        if is_last_step or (last_step - last_log_step >= log_every_steps):
          logging.info(
              'training step %d, loss = %.5f (%.3f sec/batch).',
              last_step, loss_value, float(time.time() - start_time))
          last_log_step = last_step
    except errors.OutOfRangeError as e:
      logging.warn('Got exception during tf.learn training loop possibly '
                   'due to exhausted input queue %s.', e)
    except BaseException as e:  # pylint: disable=broad-except
      # Hold on to any other exceptions while we try recording a final
      # checkpoint and summary.
      excinfo = sys.exc_info()
    finally:
      try:
        # Call supervisor.Stop() from within a try block because it re-raises
        # exceptions thrown by the supervised threads.
        supervisor.Stop(close_summary_writer=False)

        # Save one last checkpoint and summaries
        # TODO(wicke): This should be handled by Supervisor

        # In case we encountered an exception in the try block before we updated
        # last_step, update it here (again).
        last_step = get_current_step()
        if supervisor_is_chief:
          ckpt_path = supervisor.save_path
          logging.info('Saving checkpoint for step %d to checkpoint: %s.',
                       last_step, ckpt_path)
          supervisor.saver.save(session, ckpt_path, global_step=last_step)

          # Finish monitors.
          for monitor in monitors:
            monitor.end()

      # catch OutOfRangeError which is thrown when queue is out of data (and for
      # other reasons as well).
      except errors.OutOfRangeError as e:
        logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
                     'due to exhausted input queue. Note: summary_op is not '
                     'expected to trigger dequeues. %s.', e)
      except BaseException as e:  # pylint: disable=broad-except
        # If we don't already have an exception to re-raise, raise this one.
        if not excinfo:
          raise
        # Otherwise, log this one and raise the other in the finally block.
        logging.error('Got exception during tf.learn final checkpoint %s.', e)
      finally:
        if excinfo:
          reraise(*excinfo)
    return loss_value
Ejemplo n.º 29
0
 def bias_(self):
   return checkpoints.load_variable(self._model_dir,
                                    name="linear/bias_weight")
Ejemplo n.º 30
0
def train(graph,
          output_dir,
          train_op,
          loss_op,
          global_step_tensor=None,
          init_op=None,
          init_feed_dict=None,
          init_fn=None,
          log_every_steps=10,
          supervisor_is_chief=True,
          supervisor_master='',
          supervisor_save_model_secs=600,
          supervisor_save_summaries_steps=100,
          feed_fn=None,
          steps=None,
          fail_on_nan_loss=True,
          monitors=None):
    """Train a model.

  Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
  run a training loop. The given `train_op` performs one step of training on the
  model. The `loss_op` represents the objective function of the training. It is
  expected to increment the `global_step_tensor`, a scalar integer tensor
  counting training steps. This function uses `Supervisor` to initialize the
  graph (from a checkpoint if one is available in `output_dir`), write summaries
  defined in the graph, and write regular checkpoints as defined by
  `supervisor_save_model_secs`.

  Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
  `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
  program is terminated with exit code 1.

  Args:
    graph: A graph to train. It is expected that this graph is not in use
      elsewhere.
    output_dir: A directory to write outputs to.
    train_op: An op that performs one training step when run.
    loss_op: A scalar loss tensor.
    global_step_tensor: A tensor representing the global step. If none is given,
      one is extracted from the graph using the same logic as in `Supervisor`.
    init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
      default.
    init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
      This feed dictionary will be used when `init_op` is evaluated.
    init_fn: Optional callable passed to Supervisor to initialize the model.
    log_every_steps: Output logs regularly. The logs contain timing data and the
      current loss.
    supervisor_is_chief: Whether the current process is the chief supervisor in
      charge of restoring the model and running standard services.
    supervisor_master: The master string to use when preparing the session.
    supervisor_save_model_secs: Save a checkpoint every
      `supervisor_save_model_secs` seconds when training.
    supervisor_save_summaries_steps: Save summaries every
      `supervisor_save_summaries_steps` seconds when training.
    feed_fn: A function that is called every iteration to produce a `feed_dict`
      passed to `session.run` calls. Optional.
    steps: Trains for this many steps (e.g. current global step + `steps`).
    fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
      evaluates to `NaN`. If false, continue training as if nothing happened.
    monitors: List of `BaseMonitor` subclass instances. Used for callbacks
      inside the training loop.

  Returns:
    The final loss value.

  Raises:
    ValueError: If `global_step_tensor` is not provided. See
        `tf.contrib.framework.get_global_step` for how we look it up if not
        provided explicitly.
    NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
        evaluates to `NaN`.
  """
    if not output_dir:
        raise ValueError('Output directory should be non-empty.')

    with graph.as_default():
        global_step_tensor = contrib_variables.assert_or_get_global_step(
            graph, global_step_tensor)
        if global_step_tensor is None:
            raise ValueError(
                'No "global_step" was provided or found in the graph.')

        # Get current step.
        try:
            start_step = checkpoints.load_variable(output_dir,
                                                   global_step_tensor.name)
        except (errors.NotFoundError, ValueError):
            start_step = 0

        summary_writer = (get_summary_writer(output_dir)
                          if supervisor_is_chief else None)

        # TODO(ipolosukhin): Replace all functionality of Supervisor with Monitors.
        if not supervisor_is_chief:
            # monitors should run only on the chief.
            monitors = []
        elif not monitors:
            monitors = monitors_lib.get_default_monitors(
                loss_op=loss_op,
                summary_op=logging_ops.get_summary_op(),
                save_summary_steps=supervisor_save_summaries_steps,
                summary_writer=summary_writer)

        # Start monitors, can create graph parts.
        for monitor in monitors:
            monitor.begin(max_steps=start_step + steps)

    supervisor = tf_supervisor.Supervisor(
        graph,
        init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
        init_feed_dict=init_feed_dict,
        is_chief=supervisor_is_chief,
        logdir=output_dir,
        saver=_make_saver(graph),
        global_step=global_step_tensor,
        summary_op=None,
        summary_writer=summary_writer,
        save_model_secs=supervisor_save_model_secs,
        init_fn=init_fn)
    session = supervisor.PrepareSession(master=supervisor_master,
                                        start_standard_services=True)
    supervisor.StartQueueRunners(session)

    with session:
        get_current_step = lambda: session.run(global_step_tensor)

        start_step = get_current_step()
        max_steps = start_step + steps
        last_step = start_step
        last_log_step = start_step
        loss_value = None
        logging.info('Training steps [%d,%s)', last_step,
                     'inf' if max_steps is None else str(max_steps))

        excinfo = None
        try:
            while not supervisor.ShouldStop() and ((max_steps is None) or
                                                   (last_step < max_steps)):
                start_time = time.time()
                feed_dict = feed_fn() if feed_fn is not None else None

                outputs, should_stop = _run_with_monitors(
                    session, last_step + 1, [train_op, loss_op], feed_dict,
                    monitors)

                loss_value = outputs[loss_op.name]
                if np.isnan(loss_value):
                    failure_message = 'Model diverged with loss = NaN.'
                    if fail_on_nan_loss:
                        logging.error(failure_message)
                        raise NanLossDuringTrainingError()
                    else:
                        logging.warning(failure_message)

                if should_stop:
                    break

                this_step = get_current_step()

                if this_step <= last_step:
                    logging.error(
                        'Global step was not incremented by train op at step %s'
                        ': new step %d', last_step, this_step)

                last_step = this_step
                is_last_step = (max_steps
                                is not None) and (last_step >= max_steps)
                if is_last_step or (last_step - last_log_step >=
                                    log_every_steps):
                    logging.info(
                        'training step %d, loss = %.5f (%.3f sec/batch).',
                        last_step, loss_value, float(time.time() - start_time))
                    last_log_step = last_step
        except errors.OutOfRangeError as e:
            logging.warn(
                'Got exception during tf.learn training loop possibly '
                'due to exhausted input queue %s.', e)
        except BaseException as e:  # pylint: disable=broad-except
            # Hold on to any other exceptions while we try recording a final
            # checkpoint and summary.
            excinfo = sys.exc_info()
        finally:
            try:
                # Call supervisor.Stop() from within a try block because it re-raises
                # exceptions thrown by the supervised threads.
                supervisor.Stop(close_summary_writer=False)

                # Save one last checkpoint and summaries
                # TODO(wicke): This should be handled by Supervisor

                # In case we encountered an exception in the try block before we updated
                # last_step, update it here (again).
                last_step = get_current_step()
                if supervisor_is_chief:
                    ckpt_path = supervisor.save_path
                    logging.info(
                        'Saving checkpoint for step %d to checkpoint: %s.',
                        last_step, ckpt_path)
                    supervisor.saver.save(session,
                                          ckpt_path,
                                          global_step=last_step)

                    # Finish monitors.
                    for monitor in monitors:
                        monitor.end()

            # catch OutOfRangeError which is thrown when queue is out of data (and for
            # other reasons as well).
            except errors.OutOfRangeError as e:
                logging.warn(
                    'OutOfRangeError in tf.learn final checkpoint possibly '
                    'due to exhausted input queue. Note: summary_op is not '
                    'expected to trigger dequeues. %s.', e)
            except BaseException as e:  # pylint: disable=broad-except
                # If we don't already have an exception to re-raise, raise this one.
                if not excinfo:
                    raise
                # Otherwise, log this one and raise the other in the finally block.
                logging.error(
                    'Got exception during tf.learn final checkpoint %s.', e)
            finally:
                if excinfo:
                    reraise(*excinfo)
        return loss_value
Ejemplo n.º 31
0
 def clusters(self):
     """Returns cluster centers."""
     return checkpoints.load_variable(self.model_dir, self.CLUSTERS)
Ejemplo n.º 32
0
 def get_variable_value(self, name):
   return checkpoints.load_variable(self.model_dir, name)
Ejemplo n.º 33
0
 def testNoTensor(self):
   checkpoint_dir = self.get_temp_dir()
   with self.test_session() as session:
     _, _, _, _ = _create_checkpoints(session, checkpoint_dir)
   with self.assertRaises(tf.errors.OpError):
     self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var5"), [])
Ejemplo n.º 34
0
 def testNoCheckpoints(self):
     checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
     with self.assertRaises(tf.errors.OpError):
         self.assertAllEqual(
             checkpoints.load_variable(checkpoint_dir, "var1"), [])
Ejemplo n.º 35
0
 def clusters(self):
   """Returns cluster centers."""
   clusters = checkpoints.load_variable(self.model_dir,
                                        gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
   return np.squeeze(clusters, 1)
Ejemplo n.º 36
0
 def covariances(self):
   """Returns the covariances."""
   return checkpoints.load_variable(
       self.model_dir,
       gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
Ejemplo n.º 37
0
def _monitored_train(graph,
                     output_dir,
                     train_op,
                     loss_op,
                     global_step_tensor=None,
                     init_op=None,
                     init_feed_dict=None,
                     init_fn=None,
                     log_every_steps=10,
                     supervisor_is_chief=True,
                     supervisor_master='',
                     supervisor_save_model_secs=600,
                     keep_checkpoint_max=5,
                     supervisor_save_summaries_steps=100,
                     feed_fn=None,
                     steps=None,
                     fail_on_nan_loss=True,
                     hooks=None,
                     max_steps=None):
  """Train a model via monitored_session.

  Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
  run a training loop. The given `train_op` performs one step of training on the
  model. The `loss_op` represents the objective function of the training. It is
  expected to increment the `global_step_tensor`, a scalar integer tensor
  counting training steps. This function uses `Supervisor` to initialize the
  graph (from a checkpoint if one is available in `output_dir`), write summaries
  defined in the graph, and write regular checkpoints as defined by
  `supervisor_save_model_secs`.

  Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
  `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
  program is terminated with exit code 1.

  Args:
    graph: A graph to train. It is expected that this graph is not in use
      elsewhere.
    output_dir: A directory to write outputs to.
    train_op: An op that performs one training step when run.
    loss_op: A scalar loss tensor.
    global_step_tensor: A tensor representing the global step. If none is given,
      one is extracted from the graph using the same logic as in `Supervisor`.
    init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
      default.
    init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
      This feed dictionary will be used when `init_op` is evaluated.
    init_fn: Optional callable passed to Supervisor to initialize the model.
    log_every_steps: Output logs regularly. The logs contain timing data and the
      current loss.
    supervisor_is_chief: Whether the current process is the chief supervisor in
      charge of restoring the model and running standard services.
    supervisor_master: The master string to use when preparing the session.
    supervisor_save_model_secs: Save model every
      `supervisor_save_model_secs` seconds when training.
    keep_checkpoint_max: The maximum number of recent checkpoint files to
      keep. As new files are created, older files are deleted. If None or 0,
      all checkpoint files are kept. This is simply passed as the max_to_keep
      arg to tf.Saver constructor.
    supervisor_save_summaries_steps: Save summaries every
      `supervisor_save_summaries_steps` seconds when training.
    feed_fn: A function that is called every iteration to produce a `feed_dict`
      passed to `session.run` calls. Optional.
    steps: Trains for this many steps (e.g. current global step + `steps`).
    fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
      evaluates to `NaN`. If false, continue training as if nothing happened.
    hooks: List of `SessionRunHook` subclass instances. Used for callbacks
      inside the training loop.
    max_steps: Number of total steps for which to train model. If `None`,
      train forever. Two calls fit(steps=100) means 200 training iterations.
      On the other hand two calls of fit(max_steps=100) means, second call
      will not do any iteration since first call did all 100 steps.

  Returns:
    The final loss value.

  Raises:
    ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
      is not provided. See `tf.contrib.framework.get_global_step` for how we
      look up the latter if not provided explicitly.
    NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
      evaluates to `NaN`.
    ValueError: If both `steps` and `max_steps` are not `None`.
  """
  if (steps is not None) and (max_steps is not None):
    raise ValueError('Can not provide both steps and max_steps.')
  if not output_dir:
    raise ValueError('Output directory should be non-empty %s.' % output_dir)
  if train_op is None:
    raise ValueError('Missing train_op.')
  if loss_op is None:
    raise ValueError('Missing loss_op.')
  if hooks is None:
    hooks = []
  if not isinstance(hooks, list):
    raise ValueError('Hooks should be a list.')
  with graph.as_default():
    global_step_tensor = contrib_variables.assert_or_get_global_step(
        graph, global_step_tensor)
  if global_step_tensor is None:
    raise ValueError('No "global_step" was provided or found in the graph.')

  if max_steps is not None:
    try:
      start_step = checkpoints.load_variable(output_dir,
                                             global_step_tensor.name)
      if max_steps <= start_step:
        logging.info('Skipping training since max_steps has already saved.')
        return None
    except:  # pylint: disable=bare-except
      pass

  # Adapted SessionRunHooks such as ExportMonitor depend on the
  # CheckpointSaverHook to be executed before they should be executed.
  # The `hooks` param comprises of deprecated monitor hooks
  # (such as ExportMonitor). Appending them after the basic_session_run_hooks.
  all_hooks = []
  with graph.as_default():
    all_hooks.extend([
        basic_session_run_hooks.NanTensorHook(
            loss_op, fail_on_nan_loss=fail_on_nan_loss),
        basic_session_run_hooks.LoggingTensorHook({
            'loss': loss_op.name,
            'step': global_step_tensor.name
        }, every_n_iter=log_every_steps),
    ])

    scaffold = monitored_session.Scaffold(
        init_op=init_op,
        init_feed_dict=init_feed_dict,
        init_fn=init_fn,
        saver=tf_saver.Saver(
            sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True))

    if not supervisor_is_chief:
      session_creator = monitored_session.WorkerSessionCreator(
          scaffold=scaffold,
          master=supervisor_master)
    else:
      session_creator = monitored_session.ChiefSessionCreator(
          scaffold=scaffold,
          checkpoint_dir=output_dir,
          master=supervisor_master)
      summary_writer = summary_writer_cache.SummaryWriterCache.get(output_dir)
      all_hooks.append(
          basic_session_run_hooks.StepCounterHook(
              summary_writer=summary_writer))
      all_hooks.append(
          basic_session_run_hooks.SummarySaverHook(
              save_steps=supervisor_save_summaries_steps,
              summary_writer=summary_writer,
              scaffold=scaffold))
      if supervisor_save_model_secs > 0:
        all_hooks.append(
            basic_session_run_hooks.CheckpointSaverHook(
                output_dir,
                save_secs=supervisor_save_model_secs,
                scaffold=scaffold))

    if steps is not None or max_steps is not None:
      all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
    all_hooks.extend(hooks)

    with monitored_session.MonitoredSession(
        session_creator=session_creator,
        hooks=all_hooks) as super_sess:
      loss = None
      while not super_sess.should_stop():
        _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else
                                 None)
      return loss
Ejemplo n.º 38
0
def _train_internal(graph,
                    output_dir,
                    train_op,
                    loss_op,
                    global_step_tensor,
                    init_op,
                    init_feed_dict,
                    init_fn,
                    log_every_steps,
                    supervisor_is_chief,
                    supervisor_master,
                    supervisor_save_model_secs,
                    keep_checkpoint_max,
                    supervisor_save_summaries_steps,
                    feed_fn,
                    steps,
                    fail_on_nan_loss,
                    monitors,
                    max_steps):
  """See train."""
  if (steps is not None) and (max_steps is not None):
    raise ValueError('Can not provide both steps and max_steps.')
  if not output_dir:
    raise ValueError('Output directory should be non-empty %s.' % output_dir)
  if train_op is None:
    raise ValueError('Missing train_op.')
  if loss_op is None:
    raise ValueError('Missing loss_op.')

  with graph.as_default():
    global_step_tensor = contrib_variables.assert_or_get_global_step(
        graph, global_step_tensor)
    if global_step_tensor is None:
      raise ValueError('No "global_step" was provided or found in the graph.')

    # Get current step.
    try:
      start_step = checkpoints.load_variable(
          output_dir, global_step_tensor.name)
    except (errors.NotFoundError, ValueError):
      start_step = 0

    summary_writer = (get_summary_writer(output_dir)
                      if supervisor_is_chief else None)

    # Add default chief monitors if none were provided.
    if not monitors:
      monitors = monitors_lib.get_default_monitors(
          loss_op=loss_op,
          summary_op=logging_ops.get_summary_op(),
          save_summary_steps=supervisor_save_summaries_steps,
          summary_writer=summary_writer) if supervisor_is_chief else []

    # TODO(ipolosukhin): Replace all functionality of Supervisor
    # with Chief-Exclusive Monitors.
    if not supervisor_is_chief:
      # Prune list of monitor to the ones runnable on all workers.
      monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]

    if max_steps is None:
      max_steps = (start_step + steps) if steps else None
    # Start monitors, can create graph parts.
    for monitor in monitors:
      monitor.begin(max_steps=max_steps)

  supervisor = tf_supervisor.Supervisor(
      graph,
      init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
      init_feed_dict=init_feed_dict,
      is_chief=supervisor_is_chief,
      logdir=output_dir,
      saver=_make_saver(graph, keep_checkpoint_max),
      global_step=global_step_tensor,
      summary_op=None,
      summary_writer=summary_writer,
      save_model_secs=supervisor_save_model_secs,
      init_fn=init_fn)
  session = supervisor.PrepareSession(master=supervisor_master,
                                      start_standard_services=True)
  supervisor.StartQueueRunners(session)

  with session:
    get_current_step = lambda: session.run(global_step_tensor)

    start_step = get_current_step()
    last_step = start_step
    last_log_step = start_step
    loss_value = None
    logging.info('Training steps [%d,%s)', last_step, 'inf'
                 if max_steps is None else str(max_steps))

    excinfo = None
    try:
      while not supervisor.ShouldStop() and (
          (max_steps is None) or (last_step < max_steps)):
        start_time = time.time()
        feed_dict = feed_fn() if feed_fn is not None else None

        outputs, should_stop = _run_with_monitors(
            session, last_step + 1, [train_op, loss_op], feed_dict, monitors)

        loss_value = outputs[loss_op.name]
        if np.isnan(loss_value):
          failure_message = 'Model diverged with loss = NaN.'
          if fail_on_nan_loss:
            logging.error(failure_message)
            raise monitors_lib.NanLossDuringTrainingError()
          else:
            logging.warning(failure_message)

        if should_stop:
          break

        this_step = get_current_step()

        if this_step <= last_step:
          logging.error(
              'Global step was not incremented by train op at step %s'
              ': new step %d', last_step, this_step)

        last_step = this_step
        is_last_step = (max_steps is not None) and (last_step >= max_steps)
        if is_last_step or (last_step - last_log_step >= log_every_steps):
          logging.info(
              'training step %d, loss = %.5f (%.3f sec/batch).',
              last_step, loss_value, float(time.time() - start_time))
          last_log_step = last_step
    except errors.OutOfRangeError as e:
      logging.warn('Got exception during tf.learn training loop possibly '
                   'due to exhausted input queue %s.', e)
    except StopIteration:
      logging.info('Exhausted input iterarator.')
    except BaseException as e:  # pylint: disable=broad-except
      # Hold on to any other exceptions while we try recording a final
      # checkpoint and summary.
      excinfo = sys.exc_info()
    finally:
      try:
        # Call supervisor.Stop() from within a try block because it re-raises
        # exceptions thrown by the supervised threads.
        supervisor.Stop(close_summary_writer=False)

        # Save one last checkpoint and summaries
        # TODO(wicke): This should be handled by Supervisor

        # In case we encountered an exception in the try block before we updated
        # last_step, update it here (again).
        last_step = get_current_step()
        if supervisor_is_chief:
          ckpt_path = supervisor.save_path
          logging.info('Saving checkpoint for step %d to checkpoint: %s.',
                       last_step, ckpt_path)
          supervisor.saver.save(session, ckpt_path, global_step=last_step)

          # Finish monitors.
          for monitor in monitors:
            monitor.end()

      # catch OutOfRangeError which is thrown when queue is out of data (and for
      # other reasons as well).
      except errors.OutOfRangeError as e:
        logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
                     'due to exhausted input queue. Note: summary_op is not '
                     'expected to trigger dequeues. %s.', e)
      except BaseException as e:  # pylint: disable=broad-except
        # If we don't already have an exception to re-raise, raise this one.
        if not excinfo:
          raise
        # Otherwise, log this one and raise the other in the finally block.
        logging.error('Got exception during tf.learn final checkpoint %s.', e)
      finally:
        if excinfo:
          reraise(*excinfo)
    return loss_value
Ejemplo n.º 39
0
 def get_variable_value(self, name):
     return checkpoints.load_variable(self.model_dir, name)
Ejemplo n.º 40
0
 def testNoCheckpoints(self):
   checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
   with self.assertRaises(tf.errors.OpError):
     self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var1"), [])
Ejemplo n.º 41
0
def weights(model):
    return checkpoints.load_variable(model.model_dir, 'Variable')
Ejemplo n.º 42
0
 def bias_(self):
   return checkpoints.load_variable(self._model_dir,
                                    name="linear/bias_weight")
Ejemplo n.º 43
0
def _monitored_train(graph,
                     output_dir,
                     train_op,
                     loss_op,
                     global_step_tensor=None,
                     init_op=None,
                     init_feed_dict=None,
                     init_fn=None,
                     log_every_steps=10,
                     supervisor_is_chief=True,
                     supervisor_master='',
                     supervisor_save_model_secs=600,
                     keep_checkpoint_max=5,
                     supervisor_save_summaries_steps=100,
                     feed_fn=None,
                     steps=None,
                     fail_on_nan_loss=True,
                     hooks=None,
                     max_steps=None):
  """Train a model via monitored_session.

  Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
  run a training loop. The given `train_op` performs one step of training on the
  model. The `loss_op` represents the objective function of the training. It is
  expected to increment the `global_step_tensor`, a scalar integer tensor
  counting training steps. This function uses `Supervisor` to initialize the
  graph (from a checkpoint if one is available in `output_dir`), write summaries
  defined in the graph, and write regular checkpoints as defined by
  `supervisor_save_model_secs`.

  Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
  `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
  program is terminated with exit code 1.

  Args:
    graph: A graph to train. It is expected that this graph is not in use
      elsewhere.
    output_dir: A directory to write outputs to.
    train_op: An op that performs one training step when run.
    loss_op: A scalar loss tensor.
    global_step_tensor: A tensor representing the global step. If none is given,
      one is extracted from the graph using the same logic as in `Supervisor`.
    init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
      default.
    init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
      This feed dictionary will be used when `init_op` is evaluated.
    init_fn: Optional callable passed to Supervisor to initialize the model.
    log_every_steps: Output logs regularly. The logs contain timing data and the
      current loss. A `0` or negative value disables logging.
    supervisor_is_chief: Whether the current process is the chief supervisor in
      charge of restoring the model and running standard services.
    supervisor_master: The master string to use when preparing the session.
    supervisor_save_model_secs: Save model every
      `supervisor_save_model_secs` seconds when training.
    keep_checkpoint_max: The maximum number of recent checkpoint files to
      keep. As new files are created, older files are deleted. If None or 0,
      all checkpoint files are kept. This is simply passed as the max_to_keep
      arg to `tf.Saver` constructor.
    supervisor_save_summaries_steps: Save summaries every
      `supervisor_save_summaries_steps` seconds when training.
    feed_fn: A function that is called every iteration to produce a `feed_dict`
      passed to `session.run` calls. Optional.
    steps: Trains for this many steps (e.g. current global step + `steps`).
    fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
      evaluates to `NaN`. If false, continue training as if nothing happened.
    hooks: List of `SessionRunHook` subclass instances. Used for callbacks
      inside the training loop.
    max_steps: Number of total steps for which to train model. If `None`,
      train forever. Two calls fit(steps=100) means 200 training iterations.
      On the other hand two calls of fit(max_steps=100) means, second call
      will not do any iteration since first call did all 100 steps.

  Returns:
    The final loss value.

  Raises:
    ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
      is not provided. See `tf.contrib.framework.get_global_step` for how we
      look up the latter if not provided explicitly.
    NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
      evaluates to `NaN`.
    ValueError: If both `steps` and `max_steps` are not `None`.
  """
  if (steps is not None) and (max_steps is not None):
    raise ValueError('Can not provide both steps and max_steps.')
  if not output_dir:
    raise ValueError('Output directory should be non-empty %s.' % output_dir)
  if train_op is None:
    raise ValueError('Missing train_op.')
  if loss_op is None:
    raise ValueError('Missing loss_op.')
  if hooks is None:
    hooks = []
  if not isinstance(hooks, list):
    raise ValueError('Hooks should be a list.')
  with graph.as_default():
    global_step_tensor = contrib_variables.assert_or_get_global_step(
        graph, global_step_tensor)
  if global_step_tensor is None:
    raise ValueError('No "global_step" was provided or found in the graph.')

  if max_steps is not None:
    try:
      start_step = checkpoints.load_variable(output_dir,
                                             global_step_tensor.name)
      if max_steps <= start_step:
        logging.info('Skipping training since max_steps has already saved.')
        return None
    except:  # pylint: disable=bare-except
      pass

  # Adapted SessionRunHooks such as ExportMonitor depend on the
  # CheckpointSaverHook to be executed before they should be executed.
  # The `hooks` param comprises of deprecated monitor hooks
  # (such as ExportMonitor). Appending them after the basic_session_run_hooks.
  all_hooks = []
  with graph.as_default():
    all_hooks.append(basic_session_run_hooks.NanTensorHook(
        loss_op, fail_on_nan_loss=fail_on_nan_loss))
    if log_every_steps > 0:
      all_hooks.append(basic_session_run_hooks.LoggingTensorHook({
          'loss': loss_op.name,
          'step': global_step_tensor.name
      }, every_n_iter=log_every_steps))

    def make_saver():
      return tf_saver.Saver(
          sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True)

    scaffold = monitored_session.Scaffold(
        init_op=init_op,
        init_feed_dict=init_feed_dict,
        init_fn=init_fn,
        saver=monitored_session.Scaffold.get_or_default('saver',
                                                        ops.GraphKeys.SAVERS,
                                                        make_saver))

    if not supervisor_is_chief:
      session_creator = monitored_session.WorkerSessionCreator(
          scaffold=scaffold,
          master=supervisor_master)
    else:
      session_creator = monitored_session.ChiefSessionCreator(
          scaffold=scaffold,
          checkpoint_dir=output_dir,
          master=supervisor_master)
      summary_writer = summary_writer_cache.SummaryWriterCache.get(output_dir)
      all_hooks.append(
          basic_session_run_hooks.StepCounterHook(
              summary_writer=summary_writer))
      all_hooks.append(
          basic_session_run_hooks.SummarySaverHook(
              save_steps=supervisor_save_summaries_steps,
              summary_writer=summary_writer,
              scaffold=scaffold))
      if supervisor_save_model_secs > 0:
        all_hooks.append(
            basic_session_run_hooks.CheckpointSaverHook(
                output_dir,
                save_secs=supervisor_save_model_secs,
                scaffold=scaffold))

    if steps is not None or max_steps is not None:
      all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
    all_hooks.extend(hooks)

    with monitored_session.MonitoredSession(
        session_creator=session_creator,
        hooks=all_hooks) as super_sess:
      loss = None
      while not super_sess.should_stop():
        _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else
                                 None)
      return loss