コード例 #1
0
class DataConfig(cfg.DataConfig):
  """Input config for training."""
  input_path: str = ''
  global_batch_size: int = 0
  is_training: bool = True
  dtype: str = 'float32'
  shuffle_buffer_size: int = 10000
  cycle_length: int = 10
  is_multilabel: bool = False
  aug_rand_hflip: bool = True
  aug_crop: Optional[bool] = True
  aug_type: Optional[
      common.Augmentation] = None  # Choose from AutoAugment and RandAugment.
  color_jitter: float = 0.
  random_erasing: Optional[common.RandomErasing] = None
  file_type: str = 'tfrecord'
  image_field_key: str = 'image/encoded'
  label_field_key: str = 'image/class/label'
  decode_jpeg_only: bool = True
  mixup_and_cutmix: Optional[common.MixupAndCutmix] = None
  decoder: Optional[common.DataDecoder] = common.DataDecoder()

  # Keep for backward compatibility.
  aug_policy: Optional[str] = None  # None, 'autoaug', or 'randaug'.
  randaug_magnitude: Optional[int] = 10
コード例 #2
0
class DataConfig(cfg.DataConfig):
    """Input config for training."""
    output_size: List[int] = dataclasses.field(default_factory=list)
    # If crop_size is specified, image will be resized first to
    # output_size, then crop of size crop_size will be cropped.
    crop_size: List[int] = dataclasses.field(default_factory=list)
    input_path: str = ''
    global_batch_size: int = 0
    is_training: bool = True
    dtype: str = 'float32'
    shuffle_buffer_size: int = 1000
    cycle_length: int = 10
    # If resize_eval_groundtruth is set to False, original image sizes are used
    # for eval. In that case, groundtruth_padded_size has to be specified too to
    # allow for batching the variable input sizes of images.
    resize_eval_groundtruth: bool = True
    groundtruth_padded_size: List[int] = dataclasses.field(
        default_factory=list)
    aug_scale_min: float = 1.0
    aug_scale_max: float = 1.0
    aug_rand_hflip: bool = True
    preserve_aspect_ratio: bool = True
    aug_policy: Optional[str] = None
    drop_remainder: bool = True
    file_type: str = 'tfrecord'
    decoder: Optional[common.DataDecoder] = common.DataDecoder()
コード例 #3
0
class DataConfig(cfg.DataConfig):
    """Input config for training."""
    input_path: str = ''
    global_batch_size: int = 0
    is_training: bool = False
    dtype: str = 'bfloat16'
    decoder: common.DataDecoder = common.DataDecoder()
    parser: Parser = Parser()
    shuffle_buffer_size: int = 10000
    file_type: str = 'tfrecord'
コード例 #4
0
class DataConfig(cfg.DataConfig):
    """Input config for training."""
    input_path: str = ''
    tfds_name: str = ''
    tfds_split: str = 'train'
    global_batch_size: int = 0
    is_training: bool = False
    dtype: str = 'bfloat16'
    decoder: common.DataDecoder = common.DataDecoder()
    shuffle_buffer_size: int = 10000
    file_type: str = 'tfrecord'
    drop_remainder: bool = True
コード例 #5
0
class DataConfig(cfg.DataConfig):
  """Input config for training."""
  input_path: str = ''
  global_batch_size: int = 0
  is_training: bool = False
  dtype: str = 'bfloat16'
  decoder: common.DataDecoder = common.DataDecoder()
  parser: Parser = Parser()
  shuffle_buffer_size: int = 10000
  file_type: str = 'tfrecord'
  drop_remainder: bool = True
  # Number of examples in the data set, it's used to create the annotation file.
  num_examples: int = -1