def add_annotation(self, name_scope='annotation', enable_tracing=False):
        """Adds an annotation pipeline to the graph.

    This will create the following additional named targets by default, for use
    in C++ annotation code (as well as regular ComputeSession targets):
      annotation/ComputeSession/session_id (placeholder for giving unique id)
      annotation/EmitAnnotations (get annotated data)
      annotation/GetComponentTrace (get trace data)
      annotation/SetTracing (sets tracing based on annotation/tracing_on)

    Args:
      name_scope: Scope for the annotation pipeline.
      enable_tracing: Enabling this will result in two things:
          1. Tracing will be enabled during inference.
          2. A 'traces' node will be added to the outputs.

    Returns:
      A dictionary of input and output nodes.
    """
        with tf.name_scope(name_scope):
            handle, input_batch = self._get_session_with_reader(enable_tracing)
            handle = self.build_inference(handle, use_moving_average=True)

            annotations = dragnn_ops.emit_annotations(
                handle, component=self.spec.component[-1].name)
            outputs = {'annotations': annotations}

            if enable_tracing:
                outputs['traces'] = dragnn_ops.get_component_trace(
                    handle, component=self.spec.component[-1].name)

            return self._outputs_with_release(handle,
                                              {'input_batch': input_batch},
                                              outputs)
Exemplo n.º 2
0
  def add_annotation(self, name_scope='annotation', enable_tracing=False):
    """Adds an annotation pipeline to the graph.

    This will create the following additional named targets by default, for use
    in C++ annotation code (as well as regular ComputeSession targets):
      annotation/ComputeSession/session_id (placeholder for giving unique id)
      annotation/EmitAnnotations (get annotated data)
      annotation/GetComponentTrace (get trace data)
      annotation/SetTracing (sets tracing based on annotation/tracing_on)

    Args:
      name_scope: Scope for the annotation pipeline.
      enable_tracing: Enabling this will result in two things:
          1. Tracing will be enabled during inference.
          2. A 'traces' node will be added to the outputs.

    Returns:
      A dictionary of input and output nodes.
    """
    with tf.name_scope(name_scope):
      handle, input_batch = self._get_session_with_reader(enable_tracing)
      handle = self.build_inference(handle, use_moving_average=True)

      annotations = dragnn_ops.emit_annotations(
          handle, component=self.spec.component[-1].name)
      outputs = {'annotations': annotations}

      if enable_tracing:
        outputs['traces'] = dragnn_ops.get_component_trace(
            handle, component=self.spec.component[-1].name)

      return self._outputs_with_release(handle, {'input_batch': input_batch},
                                        outputs)
Exemplo n.º 3
0
  def add_training_from_config(self,
                               target_config,
                               prefix='train-',
                               trace_only=False,
                               **kwargs):
    """Constructs a training pipeline from a TrainTarget proto.

    This constructs a separately managed pipeline for a given target:
    it has its own ComputeSession, InputSpec placeholder, etc. The ops
    are given standardized names to allow access from the C++ API. It
    passes the values in target_config to build_training() above.

    For the default prefix ('train-'), and a target named 'target', this will
    construct the following targets in the graph:

      train-target/ComputeSession/* (the standard ComputeSession controls)
      train-target/run (handle to a completed training step)
      train-target/metrics (per-decision metrics from gold oracles)
      train-target/cost (total cost across all components)

    Enabling `trace_only` effectively creates a graph that is a 'dry run'.
    There will be no side affects. In addition, the gradients won't be computed
    and the model parameters will not be updated.

    Args:
      target_config: the TrainTarget proto.
      prefix: Preprends target_config.name with this to construct
        a unique identifier.
      trace_only: Enabling this will result in:
          1. Tracing will be enabled for the ComputeSession..
          2. A 'traces' node will be added to the outputs.
          3. Gradients will not be computed.
      **kwargs: Passed on to build_training() above.

    Returns:
      Dictionary of training targets.
    """
    logging.info('Creating new training target '
                 '%s'
                 ' from config: %s', target_config.name, str(target_config))
    scope_id = prefix + target_config.name
    with tf.name_scope(scope_id):
      # Construct training targets. Disable tracing during training.
      handle, input_batch = self._get_session_with_reader(trace_only)

      # If `trace_only` is True, the training graph shouldn't have any
      # side effects. Otherwise, the standard training scenario should
      # generate gradients and update counters.
      handle, outputs = self.build_training(
          handle,
          compute_gradients=not trace_only,
          advance_counters=not trace_only,
          component_weights=target_config.component_weights,
          unroll_using_oracle=target_config.unroll_using_oracle,
          max_index=target_config.max_index,
          **kwargs)
      if trace_only:
        outputs['traces'] = dragnn_ops.get_component_trace(
            handle, component=self.spec.component[-1].name)
      else:
        # Standard training keeps track of the number of training steps.
        outputs['target_step'] = tf.get_variable(
            scope_id + '/TargetStep', [],
            initializer=tf.zeros_initializer(),
            dtype=tf.int32)
        increment_target_step = tf.assign_add(
            outputs['target_step'], 1, use_locking=True)

        with tf.control_dependencies([increment_target_step]):
          handle = tf.identity(handle)

      return self._outputs_with_release(handle, {'input_batch': input_batch},
                                        outputs)
Exemplo n.º 4
0
    def add_training_from_config(self,
                                 target_config,
                                 prefix='train-',
                                 trace_only=False,
                                 **kwargs):
        """Constructs a training pipeline from a TrainTarget proto.

    This constructs a separately managed pipeline for a given target:
    it has its own ComputeSession, InputSpec placeholder, etc. The ops
    are given standardized names to allow access from the C++ API. It
    passes the values in target_config to build_training() above.

    For the default prefix ('train-'), and a target named 'target', this will
    construct the following targets in the graph:

      train-target/ComputeSession/* (the standard ComputeSession controls)
      train-target/run (handle to a completed training step)
      train-target/metrics (per-decision metrics from gold oracles)
      train-target/cost (total cost across all components)

    Enabling `trace_only` effectively creates a graph that is a 'dry run'.
    There will be no side affects. In addition, the gradients won't be computed
    and the model parameters will not be updated.

    Args:
      target_config: the TrainTarget proto.
      prefix: Preprends target_config.name with this to construct
        a unique identifier.
      trace_only: Enabling this will result in:
          1. Tracing will be enabled for the ComputeSession..
          2. A 'traces' node will be added to the outputs.
          3. Gradients will not be computed.
      **kwargs: Passed on to build_training() above.

    Returns:
      Dictionary of training targets.
    """
        logging.info('Creating new training target '
                     '%s'
                     ' from config: %s', target_config.name,
                     str(target_config))
        scope_id = prefix + target_config.name
        with tf.name_scope(scope_id):
            # Construct training targets. Disable tracing during training.
            handle, input_batch = self._get_session_with_reader(trace_only)

            # If `trace_only` is True, the training graph shouldn't have any
            # side effects. Otherwise, the standard training scenario should
            # generate gradients and update counters.
            handle, outputs = self.build_training(
                handle,
                compute_gradients=not trace_only,
                advance_counters=not trace_only,
                component_weights=target_config.component_weights,
                unroll_using_oracle=target_config.unroll_using_oracle,
                max_index=target_config.max_index,
                **kwargs)
            if trace_only:
                outputs['traces'] = dragnn_ops.get_component_trace(
                    handle, component=self.spec.component[-1].name)
            else:
                # Standard training keeps track of the number of training steps.
                outputs['target_step'] = tf.get_variable(
                    scope_id + '/TargetStep', [],
                    initializer=tf.zeros_initializer(),
                    dtype=tf.int32)
                increment_target_step = tf.assign_add(outputs['target_step'],
                                                      1,
                                                      use_locking=True)

                with tf.control_dependencies([increment_target_step]):
                    handle = tf.identity(handle)

            return self._outputs_with_release(handle,
                                              {'input_batch': input_batch},
                                              outputs)