Exemple #1
0
    def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
      """Preprocesses Places-365 image Tensors using inception_preprocessing."""
      # `preprocess_image` returns images in [-1, 1].
      image = inception_preprocessing.preprocess_image(
          example['image'],
          height=224,
          width=224,
          is_training=self._is_training)
      # Rescale to [0, 1].
      image = (image + 1.0) / 2.0

      label = tf.cast(example['label'], tf.int32)
      return {'features': image, 'labels': label}
        def _example_parser(
                example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
            """Preprocesses ImageNet image Tensors."""
            per_example_step_seed = tf.random.experimental.stateless_fold_in(
                self._seed, example[self._enumerate_id_key])
            if self._preprocessing_type == 'inception':
                # `inception_preprocessing.preprocess_image` returns images in [-1, 1].
                image = inception_preprocessing.preprocess_image(
                    example['image'],
                    height=self._image_size,
                    width=self._image_size,
                    seed=per_example_step_seed,
                    is_training=self._is_training)
                # Rescale to [0, 1].
                image = (image + 1.0) / 2.0
            elif self._preprocessing_type == 'resnet':
                # `resnet_preprocessing.preprocess_image` returns images in [0, 1].
                image = resnet_preprocessing.preprocess_image(
                    image_bytes=example['image'],
                    is_training=self._is_training,
                    use_bfloat16=self._use_bfloat16,
                    image_size=self._image_size,
                    seed=per_example_step_seed,
                    resize_method=self._resnet_preprocessing_resize_method)
            else:
                raise ValueError(
                    'Invalid preprocessing type, must be one of "inception" or '
                    '"resnet", received {}.'.format(self._preprocessing_type))

            if self._normalize_input:
                image = (tf.cast(image, tf.float32) -
                         IMAGENET_MEAN) / IMAGENET_STDDEV
            if self._use_bfloat16:
                image = tf.cast(image, tf.bfloat16)

            # Note that labels are always float32, even when images are bfloat16.
            if self._one_hot:
                label = tf.one_hot(example['label'], 1000, dtype=tf.float32)
            else:
                label = tf.cast(example['label'], tf.float32)
            parsed_example = {
                'features': image,
                'labels': label,
            }
            if 'file_name' in example:
                parsed_example['file_name'] = example['file_name']
            return parsed_example
        def _example_parser(
                example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
            """Preprocesses Places-365 image Tensors using inception_preprocessing."""
            per_example_step_seed = tf.random.experimental.stateless_fold_in(
                self._seed, example[self._enumerate_id_key])
            # `preprocess_image` returns images in [-1, 1].
            image = inception_preprocessing.preprocess_image(
                example['image'],
                height=224,
                width=224,
                seed=per_example_step_seed,
                is_training=self._is_training)
            # Rescale to [0, 1].
            image = (image + 1.0) / 2.0

            label = tf.cast(example['label'], tf.int32)
            return {'features': image, 'labels': label}
Exemple #4
0
    def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
      """Preprocesses ImageNet image Tensors using inception_preprocessing."""
      # `preprocess_image` returns images in [-1, 1].
      image = inception_preprocessing.preprocess_image(
          example['image'],
          height=224,
          width=224,
          is_training=self._is_training(split))
      # Rescale to [0, 1].
      image = (image + 1.0) / 2.0

      label = tf.cast(example['label'], tf.int32)
      parsed_example = {
          'features': image,
          'labels': label,
      }
      if 'file_name' in example:
        parsed_example['file_name'] = example['file_name']
      return parsed_example