def graph_fn():
      label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
      self._create_label_map(label_map_proto_file)
      decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
          label_map_proto_file=label_map_proto_file)
      sequence_example_serialized = seq_example_util.make_sequence_example(
          dataset_name='video_dataset',
          video_id='video',
          encoded_images=encoded_images,
          image_height=image_height,
          image_width=image_width,
          image_format='JPEG',
          image_source_ids=[str(i) for i in range(num_frames)],
          is_annotated=[[1], [1], [1], [1]],
          bboxes=[
              [[0., 0., 1., 1.]],  # Frame 0.
              [[0.2, 0.2, 1., 1.],
               [0., 0., 1., 1.]],  # Frame 1.
              [[0., 0., 1., 1.],  # Frame 2.
               [0.1, 0.1, 0.2, 0.2]],
              [[]],  # Frame 3.
          ],
          label_strings=[
              ['fox'],  # Frame 0. Fox will be filtered out.
              ['fox', 'dog'],  # Frame 1. Fox will be filtered out.
              ['dog', 'cat'],  # Frame 2.
              [],  # Frame 3
          ]).SerializeToString()

      example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
      return decoder.decode(example_string_tensor)
    def graph_fn():
      sequence_example_serialized = seq_example_util.make_sequence_example(
          dataset_name='video_dataset',
          video_id='video',
          encoded_images=encoded_images,
          image_height=image_height,
          image_width=image_width,
          image_format='JPEG',
          image_source_ids=[str(i) for i in range(num_frames)],
          bboxes=[
              [[]],
              [[]],
              [[]],
              [[]]
          ],
          label_strings=[
              [],
              [],
              [],
              []
          ]).SerializeToString()
      example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)

      label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
      self._create_label_map(label_map_proto_file)
      decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
          label_map_proto_file=label_map_proto_file)
      return decoder.decode(example_string_tensor)
def build(input_reader_config):
    """Builds a tensor dictionary based on the InputReader config.

  Args:
    input_reader_config: A input_reader_pb2.InputReader object.

  Returns:
    A tensor dict based on the input_reader_config.

  Raises:
    ValueError: On invalid input reader proto.
    ValueError: If no input paths are specified.
  """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')

    if input_reader_config.WhichOneof(
            'input_reader') == 'tf_record_input_reader':
        config = input_reader_config.tf_record_input_reader
        if not config.input_path:
            raise ValueError('At least one input path must be specified in '
                             '`input_reader_config`.')
        _, string_tensor = parallel_reader.parallel_read(
            config.input_path[:],  # Convert `RepeatedScalarContainer` to list.
            reader_class=tf.TFRecordReader,
            num_epochs=(input_reader_config.num_epochs
                        if input_reader_config.num_epochs else None),
            num_readers=input_reader_config.num_readers,
            shuffle=input_reader_config.shuffle,
            dtypes=[tf.string, tf.string],
            capacity=input_reader_config.queue_capacity,
            min_after_dequeue=input_reader_config.min_after_dequeue)

        label_map_proto_file = None
        if input_reader_config.HasField('label_map_path'):
            label_map_proto_file = input_reader_config.label_map_path
        input_type = input_reader_config.input_type
        if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'):
            decoder = tf_example_decoder.TfExampleDecoder(
                load_instance_masks=input_reader_config.load_instance_masks,
                instance_mask_type=input_reader_config.mask_type,
                label_map_proto_file=label_map_proto_file,
                load_context_features=input_reader_config.load_context_features
            )
            return decoder.decode(string_tensor)
        elif input_type == input_reader_pb2.InputType.Value(
                'TF_SEQUENCE_EXAMPLE'):
            decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
                label_map_proto_file=label_map_proto_file,
                load_context_features=input_reader_config.load_context_features
            )
            return decoder.decode(string_tensor)
        raise ValueError('Unsupported input_type.')
    raise ValueError('Unsupported input_reader_config.')
Exemple #4
0
def build(input_reader_config):
    """Builds a DataDecoder based only on the open source config proto.

  Args:
    input_reader_config: An input_reader_pb2.InputReader object.

  Returns:
    A DataDecoder based on the input_reader_config.

  Raises:
    ValueError: On invalid input reader proto.
  """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')

    if input_reader_config.WhichOneof(
            'input_reader') == 'tf_record_input_reader':
        label_map_proto_file = None
        if input_reader_config.HasField('label_map_path'):
            label_map_proto_file = input_reader_config.label_map_path
        input_type = input_reader_config.input_type
        if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'):
            decoder = tf_example_decoder.TfExampleDecoder(
                load_instance_masks=input_reader_config.load_instance_masks,
                load_multiclass_scores=input_reader_config.
                load_multiclass_scores,
                load_context_features=input_reader_config.
                load_context_features,
                instance_mask_type=input_reader_config.mask_type,
                label_map_proto_file=label_map_proto_file,
                use_display_name=input_reader_config.use_display_name,
                num_additional_channels=input_reader_config.
                num_additional_channels,
                num_keypoints=input_reader_config.num_keypoints,
                expand_hierarchy_labels=input_reader_config.
                expand_labels_hierarchy,
                load_dense_pose=input_reader_config.load_dense_pose,
                load_track_id=input_reader_config.load_track_id)
            return decoder
        elif input_type == input_reader_pb2.InputType.Value(
                'TF_SEQUENCE_EXAMPLE'):
            decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
                label_map_proto_file=label_map_proto_file,
                load_context_features=input_reader_config.
                load_context_features,
                load_context_image_ids=input_reader_config.
                load_context_image_ids)
            return decoder
        raise ValueError('Unsupported input_type in config.')

    raise ValueError('Unsupported input_reader_config.')