Example #1
0
    def build_inputs(self, params, input_context=None):
        """Builds classification input."""

        num_classes = self.task_config.model.num_classes
        input_size = self.task_config.model.input_size

        if params.tfds_name:
            if params.tfds_name in tfds_classification_decoders.TFDS_ID_TO_DECODER_MAP:
                decoder = tfds_classification_decoders.TFDS_ID_TO_DECODER_MAP[
                    params.tfds_name]()
            else:
                raise ValueError('TFDS {} is not supported'.format(
                    params.tfds_name))
        else:
            decoder = classification_input.Decoder()

        parser = classification_input.Parser(
            output_size=input_size[:2],
            num_classes=num_classes,
            aug_policy=params.aug_policy,
            randaug_magnitude=params.randaug_magnitude,
            dtype=params.dtype)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=input_context)

        return dataset
    def build_inputs(
            self,
            params: cfg.DataConfig,
            input_context: Optional[tf.distribute.InputContext] = None):
        """Builds classification input."""
        ignore_label = self.task_config.losses.ignore_label

        if params.tfds_name:
            decoder = tfds_factory.get_segmentation_decoder(params.tfds_name)
        else:
            decoder = segmentation_input.Decoder()

        parser = ClassMappingParser(
            output_size=params.output_size,
            crop_size=params.crop_size,
            ignore_label=ignore_label,
            resize_eval_groundtruth=params.resize_eval_groundtruth,
            groundtruth_padded_size=params.groundtruth_padded_size,
            aug_scale_min=params.aug_scale_min,
            aug_scale_max=params.aug_scale_max,
            aug_rand_hflip=params.aug_rand_hflip,
            dtype=params.dtype)

        parser.max_class = self.task_config.model.num_classes - 1

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=input_context)

        return dataset
Example #3
0
    def build_inputs(
        self,
        params: exp_cfg.ExampleDataConfig,
        input_context: Optional[tf.distribute.InputContext] = None
    ) -> tf.data.Dataset:
        """Builds input.

    The input from this function is a tf.data.Dataset that has gone through
    pre-processing steps, such as augmentation, batching, shuffuling, etc.

    Args:
      params: The experiment config.
      input_context: An optional InputContext used by input reader.

    Returns:
      A tf.data.Dataset object.
    """

        num_classes = self.task_config.model.num_classes
        input_size = self.task_config.model.input_size
        decoder = example_input.Decoder()

        parser = example_input.Parser(output_size=input_size[:2],
                                      num_classes=num_classes)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=input_context)

        return dataset
Example #4
0
    def build_inputs(self, params, input_context=None):
        """Builds classification input."""

        ignore_label = self.task_config.losses.ignore_label

        if params.tfds_name:
            if params.tfds_name in tfds_segmentation_decoders.TFDS_ID_TO_DECODER_MAP:
                decoder = tfds_segmentation_decoders.TFDS_ID_TO_DECODER_MAP[
                    params.tfds_name]()
            else:
                raise ValueError('TFDS {} is not supported'.format(
                    params.tfds_name))
        else:
            decoder = segmentation_input.Decoder()

        parser = segmentation_input.Parser(
            output_size=params.output_size,
            train_on_crops=params.train_on_crops,
            ignore_label=ignore_label,
            resize_eval_groundtruth=params.resize_eval_groundtruth,
            groundtruth_padded_size=params.groundtruth_padded_size,
            aug_scale_min=params.aug_scale_min,
            aug_scale_max=params.aug_scale_max,
            aug_rand_hflip=params.aug_rand_hflip,
            dtype=params.dtype)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=input_context)

        return dataset
    def build_inputs(
        self,
        params: exp_cfg.DataConfig,
        input_context: Optional[tf.distribute.InputContext] = None
    ) -> tf.data.Dataset:
        """Builds classification input."""

        num_classes = self.task_config.model.num_classes
        input_size = self.task_config.model.input_size
        image_field_key = self.task_config.train_data.image_field_key
        label_field_key = self.task_config.train_data.label_field_key
        is_multilabel = self.task_config.train_data.is_multilabel

        if params.tfds_name:
            decoder = tfds_factory.get_classification_decoder(params.tfds_name)
        else:
            decoder = classification_input.Decoder(
                image_field_key=image_field_key,
                label_field_key=label_field_key,
                is_multilabel=is_multilabel)

        parser = classification_input.Parser(
            output_size=input_size[:2],
            num_classes=num_classes,
            image_field_key=image_field_key,
            label_field_key=label_field_key,
            decode_jpeg_only=params.decode_jpeg_only,
            aug_rand_hflip=params.aug_rand_hflip,
            aug_type=params.aug_type,
            color_jitter=params.color_jitter,
            random_erasing=params.random_erasing,
            is_multilabel=is_multilabel,
            dtype=params.dtype)

        postprocess_fn = None
        if params.mixup_and_cutmix:
            postprocess_fn = augment.MixupAndCutmix(
                mixup_alpha=params.mixup_and_cutmix.mixup_alpha,
                cutmix_alpha=params.mixup_and_cutmix.cutmix_alpha,
                prob=params.mixup_and_cutmix.prob,
                label_smoothing=params.mixup_and_cutmix.label_smoothing,
                num_classes=num_classes)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training),
            postprocess_fn=postprocess_fn)

        dataset = reader.read(input_context=input_context)

        return dataset
Example #6
0
    def build_inputs(
        self,
        params: exp_cfg.DataConfig,
        input_context: Optional[tf.distribute.InputContext] = None
    ) -> tf.data.Dataset:
        """Build input dataset."""
        decoder_cfg = params.decoder.get()
        if params.decoder.type == 'simple_decoder':
            decoder = panoptic_maskrcnn_input.TfExampleDecoder(
                regenerate_source_id=decoder_cfg.regenerate_source_id,
                mask_binarize_threshold=decoder_cfg.mask_binarize_threshold,
                include_panoptic_masks=decoder_cfg.include_panoptic_masks)
        else:
            raise ValueError('Unknown decoder type: {}!'.format(
                params.decoder.type))

        parser = panoptic_maskrcnn_input.Parser(
            output_size=self.task_config.model.input_size[:2],
            min_level=self.task_config.model.min_level,
            max_level=self.task_config.model.max_level,
            num_scales=self.task_config.model.anchor.num_scales,
            aspect_ratios=self.task_config.model.anchor.aspect_ratios,
            anchor_size=self.task_config.model.anchor.anchor_size,
            dtype=params.dtype,
            rpn_match_threshold=params.parser.rpn_match_threshold,
            rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,
            rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,
            rpn_fg_fraction=params.parser.rpn_fg_fraction,
            aug_rand_hflip=params.parser.aug_rand_hflip,
            aug_scale_min=params.parser.aug_scale_min,
            aug_scale_max=params.parser.aug_scale_max,
            skip_crowd_during_training=params.parser.
            skip_crowd_during_training,
            max_num_instances=params.parser.max_num_instances,
            mask_crop_size=params.parser.mask_crop_size,
            segmentation_resize_eval_groundtruth=params.parser.
            segmentation_resize_eval_groundtruth,
            segmentation_groundtruth_padded_size=params.parser.
            segmentation_groundtruth_padded_size,
            segmentation_ignore_label=params.parser.segmentation_ignore_label,
            panoptic_ignore_label=params.parser.panoptic_ignore_label,
            include_panoptic_masks=params.parser.include_panoptic_masks)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))
        dataset = reader.read(input_context=input_context)

        return dataset
Example #7
0
    def build_inputs(
            self,
            params: exp_cfg.DataConfig,
            input_context: Optional[tf.distribute.InputContext] = None):
        """Build input dataset."""

        if params.tfds_name:
            if params.tfds_name in tfds_detection_decoders.TFDS_ID_TO_DECODER_MAP:
                decoder = tfds_detection_decoders.TFDS_ID_TO_DECODER_MAP[
                    params.tfds_name]()
            else:
                raise ValueError('TFDS {} is not supported'.format(
                    params.tfds_name))
        else:
            decoder_cfg = params.decoder.get()
            if params.decoder.type == 'simple_decoder':
                decoder = tf_example_decoder.TfExampleDecoder(
                    regenerate_source_id=decoder_cfg.regenerate_source_id)
            elif params.decoder.type == 'label_map_decoder':
                decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
                    label_map=decoder_cfg.label_map,
                    regenerate_source_id=decoder_cfg.regenerate_source_id)
            else:
                raise ValueError('Unknown decoder type: {}!'.format(
                    params.decoder.type))

        parser = retinanet_input.Parser(
            output_size=self.task_config.model.input_size[:2],
            min_level=self.task_config.model.min_level,
            max_level=self.task_config.model.max_level,
            num_scales=self.task_config.model.anchor.num_scales,
            aspect_ratios=self.task_config.model.anchor.aspect_ratios,
            anchor_size=self.task_config.model.anchor.anchor_size,
            dtype=params.dtype,
            match_threshold=params.parser.match_threshold,
            unmatched_threshold=params.parser.unmatched_threshold,
            aug_rand_hflip=params.parser.aug_rand_hflip,
            aug_scale_min=params.parser.aug_scale_min,
            aug_scale_max=params.parser.aug_scale_max,
            skip_crowd_during_training=params.parser.
            skip_crowd_during_training,
            max_num_instances=params.parser.max_num_instances)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))
        dataset = reader.read(input_context=input_context)

        return dataset
  def build_inputs(self, params: exp_cfg.DataConfig, input_context=None):
    """Builds classification input."""

    parser = video_input.Parser(input_params=params)
    postprocess_fn = video_input.PostBatchProcessor(params)

    reader = input_reader_factory.input_reader_generator(
        params,
        dataset_fn=self._get_dataset_fn(params),
        decoder_fn=self._get_decoder_fn(params),
        parser_fn=parser.parse_fn(params.is_training),
        postprocess_fn=postprocess_fn)

    dataset = reader.read(input_context=input_context)

    return dataset
Example #9
0
    def build_inputs(self, params, input_context=None):
        """Build input dataset."""
        decoder_cfg = params.decoder.get()
        if params.decoder.type == 'simple_decoder':
            decoder = tf_example_decoder.TfExampleDecoder(
                include_mask=self._task_config.model.include_mask,
                regenerate_source_id=decoder_cfg.regenerate_source_id,
                mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)
        elif params.decoder.type == 'label_map_decoder':
            decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
                label_map=decoder_cfg.label_map,
                include_mask=self._task_config.model.include_mask,
                regenerate_source_id=decoder_cfg.regenerate_source_id,
                mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)
        else:
            raise ValueError('Unknown decoder type: {}!'.format(
                params.decoder.type))

        parser = maskrcnn_input.Parser(
            output_size=self.task_config.model.input_size[:2],
            min_level=self.task_config.model.min_level,
            max_level=self.task_config.model.max_level,
            num_scales=self.task_config.model.anchor.num_scales,
            aspect_ratios=self.task_config.model.anchor.aspect_ratios,
            anchor_size=self.task_config.model.anchor.anchor_size,
            dtype=params.dtype,
            rpn_match_threshold=params.parser.rpn_match_threshold,
            rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,
            rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,
            rpn_fg_fraction=params.parser.rpn_fg_fraction,
            aug_rand_hflip=params.parser.aug_rand_hflip,
            aug_scale_min=params.parser.aug_scale_min,
            aug_scale_max=params.parser.aug_scale_max,
            skip_crowd_during_training=params.parser.
            skip_crowd_during_training,
            max_num_instances=params.parser.max_num_instances,
            include_mask=self._task_config.model.include_mask,
            mask_crop_size=params.parser.mask_crop_size)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))
        dataset = reader.read(input_context=input_context)

        return dataset
Example #10
0
    def build_inputs(
            self,
            params: exp_cfg.DataConfig,
            input_context: Optional[tf.distribute.InputContext] = None):
        """Builds classification input."""

        ignore_label = self.task_config.losses.ignore_label

        if params.tfds_name:
            if params.tfds_name in tfds_segmentation_decoders.TFDS_ID_TO_DECODER_MAP:
                decoder = tfds_segmentation_decoders.TFDS_ID_TO_DECODER_MAP[
                    params.tfds_name]()
            else:
                raise ValueError('TFDS {} is not supported'.format(
                    params.tfds_name))
        else:
            decoder = segmentation_input.Decoder()

        parser = segmentation_input.Parser(
            output_size=params.output_size,
            crop_size=params.crop_size,
            ignore_label=ignore_label,
            resize_eval_groundtruth=params.resize_eval_groundtruth,
            groundtruth_padded_size=params.groundtruth_padded_size,
            aug_scale_min=params.aug_scale_min,
            aug_scale_max=params.aug_scale_max,
            aug_rand_hflip=params.aug_rand_hflip,
            aug_policy=params.aug_policy,
            randaug_magnitude=params.randaug_magnitude,
            randaug_available_ops=params.randaug_available_ops,
            preserve_aspect_ratio=params.preserve_aspect_ratio,
            rotate_min=params.rotate_min,
            rotate_max=params.rotate_max,
            bright_min=params.bright_min,
            bright_max=params.bright_max,
            dtype=params.dtype)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=input_context)

        return dataset
Example #11
0
    def build_inputs(
        self,
        params: exp_cfg.DataConfig,
        input_context: Optional[tf.distribute.InputContext] = None
    ) -> tf.data.Dataset:
        """Builds classification input."""

        num_classes = self.task_config.model.num_classes
        input_size = self.task_config.model.input_size
        image_field_key = self.task_config.train_data.image_field_key
        label_field_key = self.task_config.train_data.label_field_key
        is_multilabel = self.task_config.train_data.is_multilabel

        if params.tfds_name:
            if params.tfds_name in tfds_classification_decoders.TFDS_ID_TO_DECODER_MAP:
                decoder = tfds_classification_decoders.TFDS_ID_TO_DECODER_MAP[
                    params.tfds_name]()
            else:
                raise ValueError('TFDS {} is not supported'.format(
                    params.tfds_name))
        else:
            decoder = classification_input.Decoder(
                image_field_key=image_field_key,
                label_field_key=label_field_key,
                is_multilabel=is_multilabel)

        parser = classification_input.Parser(
            output_size=input_size[:2],
            num_classes=num_classes,
            image_field_key=image_field_key,
            label_field_key=label_field_key,
            aug_rand_hflip=params.aug_rand_hflip,
            aug_type=params.aug_type,
            is_multilabel=is_multilabel,
            dtype=params.dtype)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=input_context)

        return dataset
Example #12
0
  def build_inputs(self,
                   params: exp_cfg.DataConfig,
                   input_context: Optional[tf.distribute.InputContext] = None):
    """Builds yolo input."""

    if params.tfds_name:
      raise ValueError('TFDS {} is not supported'.format(params.tfds_name))
    else:
      decoder = yolo_input.Decoder(is_bbox_in_pixels=params.is_bbox_in_pixels,
                                   is_xywh=params.is_xywh)

    model_params = self.task_config.model

    parser = yolo_input.Parser(
        output_size=params.output_size,
        input_size=model_params.input_size,
        anchor_per_scale=model_params.head.anchor_per_scale,
        num_classes=model_params.num_classes,
        max_bbox_per_scale=params.max_bbox_per_scale,
        strides=model_params.head.strides,
        anchors=model_params.head.anchors,
        aug_policy=params.aug_policy,
        randaug_magnitude=params.randaug_magnitude,
        randaug_available_ops=params.randaug_available_ops,
        aug_rand_hflip=params.aug_rand_hflip,
        aug_scale_min=params.aug_scale_min,
        aug_scale_max=params.aug_scale_max,
        preserve_aspect_ratio=params.preserve_aspect_ratio,
        aug_jitter_im=params.aug_jitter_im,
        aug_jitter_boxes=params.aug_jitter_boxes,
        dtype=params.dtype)

    reader = input_reader_factory.input_reader_generator(
        params,
        dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
        decoder_fn=decoder.decode,
        parser_fn=parser.parse_fn(params.is_training))

    dataset = reader.read(input_context=input_context)

    return dataset
Example #13
0
    def build_inputs(
            self,
            params: exp_cfg.DataConfig,
            input_context: Optional[tf.distribute.InputContext] = None):
        """Builds classification input."""

        parser = video_input.Parser(input_params=params,
                                    image_key=params.image_field_key,
                                    label_key=params.label_field_key)
        postprocess_fn = video_input.PostBatchProcessor(params)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=self._get_dataset_fn(params),
            decoder_fn=self._get_decoder_fn(params),
            parser_fn=parser.parse_fn(params.is_training),
            postprocess_fn=postprocess_fn)

        dataset = reader.read(input_context=input_context)

        return dataset
  def build_inputs(self, params, input_context=None):
    """Builds classification input."""

    num_classes = self.task_config.model.num_classes
    input_size = self.task_config.model.input_size
    image_field_key = self.task_config.train_data.image_field_key
    label_field_key = self.task_config.train_data.label_field_key
    is_multilabel = self.task_config.train_data.is_multilabel

    if params.tfds_name:
      decoder = tfds_factory.get_classification_decoder(params.tfds_name)
    else:
      decoder = classification_input_base.Decoder(
          image_field_key=image_field_key,
          label_field_key=label_field_key,
          is_multilabel=is_multilabel)

    parser = classification_input.Parser(
        output_size=input_size[:2],
        num_classes=num_classes,
        image_field_key=image_field_key,
        label_field_key=label_field_key,
        decode_jpeg_only=params.decode_jpeg_only,
        aug_rand_hflip=params.aug_rand_hflip,
        aug_type=params.aug_type,
        is_multilabel=is_multilabel,
        dtype=params.dtype)

    reader = input_reader_factory.input_reader_generator(
        params,
        dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
        decoder_fn=decoder.decode,
        parser_fn=parser.parse_fn(params.is_training))

    dataset = reader.read(input_context=input_context)
    return dataset
Example #15
0
    def test_yolo_input(self, is_training, should_output_image=False):

        params = DataConfig(input_path='D:/data/whizz_tf/detect_env*',
                            output_size=[256, 256],
                            global_batch_size=1,
                            is_training=is_training,
                            max_bbox_per_scale=150,
                            is_bbox_in_pixels=False,
                            is_xywh=True)

        model_params = YoloModel(num_classes=6,
                                 input_size=[256, 256, 3],
                                 head=YoloHead(anchor_per_scale=3,
                                               strides=[16, 32, 64],
                                               anchors=[
                                                   12, 16, 19, 36, 40, 28, 36,
                                                   75, 76, 55, 72, 146, 142,
                                                   110, 192, 243, 459, 401
                                               ],
                                               xy_scale=[1.2, 1.1, 1.05]))

        decoder = yolo_input.Decoder(
            is_bbox_in_pixels=params.is_bbox_in_pixels, is_xywh=params.is_xywh)
        parser = yolo_input.Parser(
            output_size=params.output_size,
            input_size=model_params.input_size,
            anchor_per_scale=model_params.head.anchor_per_scale,
            num_classes=model_params.num_classes,
            max_bbox_per_scale=params.max_bbox_per_scale,
            strides=model_params.head.strides,
            anchors=model_params.head.anchors,
            aug_policy=params.aug_policy,
            randaug_magnitude=params.randaug_magnitude,
            randaug_available_ops=params.randaug_available_ops,
            aug_rand_hflip=params.aug_rand_hflip,
            aug_scale_min=params.aug_scale_min,
            aug_scale_max=params.aug_scale_max,
            preserve_aspect_ratio=params.preserve_aspect_ratio,
            aug_jitter_im=params.aug_jitter_im,
            aug_jitter_boxes=params.aug_jitter_boxes,
            dtype=params.dtype)

        reader = input_reader_factory.input_reader_generator(
            params,
            dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
            decoder_fn=decoder.decode,
            parser_fn=parser.parse_fn(params.is_training))

        dataset = reader.read(input_context=None).take(1)

        sample = tf.data.experimental.get_single_element(dataset)
        image, target = sample

        if should_output_image:
            output_image = tf.image.convert_image_dtype(tf.squeeze(image[0]),
                                                        dtype=tf.uint8)
            output_image = tf.image.encode_png(output_image)
            tf.io.write_file('D:/Desktop/test.png', output_image)

        self.assertAllEqual(image.shape, (1, 256, 256, 3))

        for i in range(len(target['labels'])):
            self.assertAllEqual(target['labels'][i].shape[-2:], (3, 11))

            self.assertTrue(tf.reduce_all(target['labels'][i] >= 0))
            self.assertTrue(
                tf.reduce_all(target['labels'][i][:, :, :, :, :4] <= 256))
            self.assertTrue(
                tf.reduce_all(target['labels'][i][:, :, :, :, 4:] <= 1))

            print(
                'target boxes for %s has >1 %s' %
                (i,
                 tf.math.reduce_any(target['labels'][i][:, :, :, :, :4] > 1)))

            self.assertAllEqual(target['bboxes'][i].shape[-2:], (150, 4))
            self.assertTrue(
                tf.reduce_all(
                    tf.math.logical_and(target['bboxes'][i] >= 0,
                                        target['bboxes'][i] <= 256)))