Exemplo n.º 1
0
  def _outputs_with_release(self, handle, inputs, outputs):
    """Ensures ComputeSession is released before outputs are returned.

    Args:
      handle: Handle to ComputeSession on which all computation until now has
          depended. It will be released and assigned to the output 'run'.
      inputs: list of nodes we want to pass through without any dependencies.
      outputs: list of nodes whose access should ensure the ComputeSession is
          safely released.

    Returns:
      A dictionary of both input and output nodes.
    """
    with tf.control_dependencies(outputs.values()):
      with tf.name_scope('ComputeSession'):
        release_op = dragnn_ops.release_session(handle)
      run_op = tf.group(release_op, name='run')
      for output in outputs:
        with tf.control_dependencies([release_op]):
          outputs[output] = tf.identity(outputs[output], name=output)
    all_nodes = inputs.copy()
    all_nodes.update(outputs)

    # Add an alias for simply running without collecting outputs.
    # Common, for instance, with training.
    all_nodes['run'] = run_op
    return all_nodes
Exemplo n.º 2
0
    def _outputs_with_release(self, handle, inputs, outputs):
        """Ensures ComputeSession is released before outputs are returned.

    Args:
      handle: Handle to ComputeSession on which all computation until now has
          depended. It will be released and assigned to the output 'run'.
      inputs: list of nodes we want to pass through without any dependencies.
      outputs: list of nodes whose access should ensure the ComputeSession is
          safely released.

    Returns:
      A dictionary of both input and output nodes.
    """
        with tf.control_dependencies(outputs.values()):
            with tf.name_scope('ComputeSession'):
                release_op = dragnn_ops.release_session(handle)
            run_op = tf.group(release_op, name='run')
            for output in outputs:
                with tf.control_dependencies([release_op]):
                    outputs[output] = tf.identity(outputs[output], name=output)
        all_nodes = inputs.copy()
        all_nodes.update(outputs)

        # Add an alias for simply running without collecting outputs.
        # Common, for instance, with training.
        all_nodes['run'] = run_op
        return all_nodes
Exemplo n.º 3
0
  def build_warmup_graph(self, asset_dir):
    """Builds a warmup graph.

    This graph performs a MasterSpec asset location rewrite via
    SetAssetDirectory, then grabs a ComputeSession and immediately returns it.
    By grabbing a session, we cause the underlying transition systems to cache
    their static data reads.

    Args:
      asset_dir: The base directory to append to all resources.

    Returns:
      A single op suitable for passing to the legacy_init_op of the ModelSaver.
    """
    with tf.control_dependencies([dragnn_ops.set_asset_directory(asset_dir)]):
      session = self._get_compute_session()
      release_op = dragnn_ops.release_session(session)
    return tf.group(release_op, name='run')
Exemplo n.º 4
0
  def build_warmup_graph(self, asset_dir):
    """Builds a warmup graph.

    This graph performs a MasterSpec asset location rewrite via
    SetAssetDirectory, then grabs a ComputeSession and immediately returns it.
    By grabbing a session, we cause the underlying transition systems to cache
    their static data reads.

    Args:
      asset_dir: The base directory to append to all resources.

    Returns:
      A single op suitable for passing to the legacy_init_op of the ModelSaver.
    """
    with tf.control_dependencies([dragnn_ops.set_asset_directory(asset_dir)]):
      session = self._get_compute_session()
      release_op = dragnn_ops.release_session(session)
    return tf.group(release_op, name='run')