示例#1
0
def simclr_finetuning() -> cfg.ExperimentConfig:
    """Image classification general."""
    return cfg.ExperimentConfig(task=SimCLRFinetuneTask(),
                                trainer=cfg.TrainerConfig(),
                                restrictions=[
                                    'task.train_data.is_training != None',
                                    'task.validation_data.is_training != None'
                                ])
def image_classification() -> cfg.ExperimentConfig:
    """Image classification general."""
    return cfg.ExperimentConfig(task=ImageClassificationTask(),
                                trainer=cfg.TrainerConfig(),
                                restrictions=[
                                    'task.train_data.is_training != None',
                                    'task.validation_data.is_training != None'
                                ])
def semantic_segmentation() -> cfg.ExperimentConfig:
    """Semantic segmentation general."""
    return cfg.ExperimentConfig(task=SemanticSegmentationTask(),
                                trainer=cfg.TrainerConfig(),
                                restrictions=[
                                    'task.train_data.is_training != None',
                                    'task.validation_data.is_training != None'
                                ])
def seg_unet3d_test() -> cfg.ExperimentConfig:
  """Image segmentation on a dummy dataset with 3D UNet for testing purpose."""
  train_batch_size = 2
  eval_batch_size = 2
  steps_per_epoch = 10
  config = cfg.ExperimentConfig(
      task=SemanticSegmentation3DTask(
          model=SemanticSegmentationModel3D(
              num_classes=2,
              input_size=[32, 32, 32],
              num_channels=2,
              backbone=backbones.Backbone(
                  type='unet_3d', unet_3d=backbones.UNet3D(model_id=2)),
              decoder=decoders.Decoder(
                  type='unet_3d_decoder',
                  unet_3d_decoder=decoders.UNet3DDecoder(model_id=2)),
              head=SegmentationHead3D(num_convs=0, num_classes=2),
              norm_activation=common.NormActivation(
                  activation='relu', use_sync_bn=False)),
          train_data=DataConfig(
              input_path='train.tfrecord',
              num_classes=2,
              input_size=[32, 32, 32],
              num_channels=2,
              is_training=True,
              global_batch_size=train_batch_size),
          validation_data=DataConfig(
              input_path='val.tfrecord',
              num_classes=2,
              input_size=[32, 32, 32],
              num_channels=2,
              is_training=False,
              global_batch_size=eval_batch_size),
          losses=Losses(loss_type='adaptive')),
      trainer=cfg.TrainerConfig(
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          train_steps=10,
          validation_steps=10,
          validation_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
              },
              'learning_rate': {
                  'type': 'constant',
                  'constant': {
                      'learning_rate': 0.000001
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

  return config
示例#5
0
def centernet_hourglass_coco() -> cfg.ExperimentConfig:
  """COCO object detection with CenterNet."""
  train_batch_size = 128
  eval_batch_size = 8
  steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size

  config = cfg.ExperimentConfig(
      task=CenterNetTask(
          annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
                                       'instances_val2017.json'),
          model=CenterNetModel(),
          train_data=DataConfig(
              input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
              is_training=True,
              global_batch_size=train_batch_size,
              parser=Parser(),
              shuffle_buffer_size=2),
          validation_data=DataConfig(
              input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
              is_training=False,
              global_batch_size=eval_batch_size,
              shuffle_buffer_size=2),
      ),
      trainer=cfg.TrainerConfig(
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          train_steps=150 * steps_per_epoch,
          validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
          validation_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'adam',
                  'adam': {
                      'epsilon': 1e-7
                  }
              },
              'learning_rate': {
                  'type': 'cosine',
                  'cosine': {
                      'initial_learning_rate': 0.001,
                      'decay_steps': 150 * steps_per_epoch
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 2000,
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

  return config
示例#6
0
def basnet() -> cfg.ExperimentConfig:
  """BASNet general."""
  return cfg.ExperimentConfig(
      task=BASNetModel(),
      trainer=cfg.TrainerConfig(),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])
def video_classification() -> cfg.ExperimentConfig:
  """Video classification general."""
  return cfg.ExperimentConfig(
      runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
      task=VideoClassificationTask(),
      trainer=cfg.TrainerConfig(),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None',
          'task.train_data.num_classes == task.validation_data.num_classes',
      ])
示例#8
0
def detr_coco() -> cfg.ExperimentConfig:
    """Config to get results that matches the paper."""
    train_batch_size = 64
    eval_batch_size = 64
    num_train_data = 118287
    num_steps_per_epoch = num_train_data // train_batch_size
    train_steps = 500 * num_steps_per_epoch  # 500 epochs
    decay_at = train_steps - 100 * num_steps_per_epoch  # 400 epochs
    config = cfg.ExperimentConfig(
        task=DetectionConfig(train_data=coco.COCODataConfig(
            tfds_name='coco/2017',
            tfds_split='train',
            is_training=True,
            global_batch_size=train_batch_size,
            shuffle_buffer_size=1000,
        ),
                             validation_data=coco.COCODataConfig(
                                 tfds_name='coco/2017',
                                 tfds_split='validation',
                                 is_training=False,
                                 global_batch_size=eval_batch_size,
                                 drop_remainder=False)),
        trainer=cfg.TrainerConfig(
            train_steps=train_steps,
            validation_steps=-1,
            steps_per_loop=10000,
            summary_interval=10000,
            checkpoint_interval=10000,
            validation_interval=10000,
            max_to_keep=1,
            best_checkpoint_export_subdir='best_ckpt',
            best_checkpoint_eval_metric='AP',
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'detr_adamw',
                    'detr_adamw': {
                        'weight_decay_rate': 1e-4,
                        'global_clipnorm': 0.1,
                        # Avoid AdamW legacy behavior.
                        'gradient_clip_norm': 0.0
                    }
                },
                'learning_rate': {
                    'type': 'stepwise',
                    'stepwise': {
                        'boundaries': [decay_at],
                        'values': [0.0001, 1.0e-05]
                    }
                },
            })),
        restrictions=[
            'task.train_data.is_training != None',
        ])
    return config
 def setUp(self):
     super().setUp()
     self._config = cfg.ExperimentConfig(trainer=cfg.TrainerConfig(
         optimizer_config=cfg.OptimizationConfig({
             'optimizer': {
                 'type': 'sgd'
             },
             'learning_rate': {
                 'type': 'constant'
             }
         })))
def tf_vision_example_experiment() -> cfg.ExperimentConfig:
    """Definition of a full example experiment."""
    train_batch_size = 256
    eval_batch_size = 256
    steps_per_epoch = 10
    config = cfg.ExperimentConfig(
        task=ExampleTask(model=ExampleModel(num_classes=10,
                                            input_size=[128, 128, 3]),
                         losses=Losses(l2_weight_decay=1e-4),
                         train_data=ExampleDataConfig(
                             input_path='/path/to/train*',
                             is_training=True,
                             global_batch_size=train_batch_size),
                         validation_data=ExampleDataConfig(
                             input_path='/path/to/valid*',
                             is_training=False,
                             global_batch_size=eval_batch_size)),
        trainer=cfg.TrainerConfig(
            steps_per_loop=steps_per_epoch,
            summary_interval=steps_per_epoch,
            checkpoint_interval=steps_per_epoch,
            train_steps=90 * steps_per_epoch,
            validation_steps=steps_per_epoch,
            validation_interval=steps_per_epoch,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'sgd',
                    'sgd': {
                        'momentum': 0.9
                    }
                },
                'learning_rate': {
                    'type': 'cosine',
                    'cosine': {
                        'initial_learning_rate': 1.6,
                        'decay_steps': 350 * steps_per_epoch
                    }
                },
                'warmup': {
                    'type': 'linear',
                    'linear': {
                        'warmup_steps': 5 * steps_per_epoch,
                        'warmup_learning_rate': 0
                    }
                }
            })),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None'
        ])

    return config
示例#11
0
class MultiEvalExperimentConfig(base_config.Config):
  """An experiment config for single-task training and multi-task evaluation.

  Attributes:
    task: the single-stream training task.
    eval_tasks: individual evaluation tasks.
    trainer: the trainer configuration.
    runtime: the runtime configuration.
  """
  task: cfg.TaskConfig = cfg.TaskConfig()
  eval_tasks: MultiTaskConfig = MultiTaskConfig()
  trainer: cfg.TrainerConfig = cfg.TrainerConfig()
  runtime: cfg.RuntimeConfig = cfg.RuntimeConfig()
def image_classification_imagenet_vit_finetune() -> cfg.ExperimentConfig:
    """Image classification on imagenet with vision transformer."""
    train_batch_size = 512
    eval_batch_size = 512
    steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
    config = cfg.ExperimentConfig(
        task=ImageClassificationTask(
            model=ImageClassificationModel(
                num_classes=1001,
                input_size=[384, 384, 3],
                backbone=backbones.Backbone(
                    type='vit',
                    vit=backbones.VisionTransformer(model_name='vit-b16'))),
            losses=Losses(l2_weight_decay=0.0),
            train_data=DataConfig(input_path=os.path.join(
                IMAGENET_INPUT_PATH_BASE, 'train*'),
                                  is_training=True,
                                  global_batch_size=train_batch_size),
            validation_data=DataConfig(input_path=os.path.join(
                IMAGENET_INPUT_PATH_BASE, 'valid*'),
                                       is_training=False,
                                       global_batch_size=eval_batch_size)),
        trainer=cfg.TrainerConfig(
            steps_per_loop=steps_per_epoch,
            summary_interval=steps_per_epoch,
            checkpoint_interval=steps_per_epoch,
            train_steps=20000,
            validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
            validation_interval=steps_per_epoch,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'sgd',
                    'sgd': {
                        'momentum': 0.9,
                        'global_clipnorm': 1.0,
                    }
                },
                'learning_rate': {
                    'type': 'cosine',
                    'cosine': {
                        'initial_learning_rate': 0.003,
                        'decay_steps': 20000,
                    }
                }
            })),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None'
        ])

    return config
示例#13
0
文件: yt8m.py 项目: shen-zc/models
def add_trainer(
    experiment: cfg.ExperimentConfig,
    train_batch_size: int,
    eval_batch_size: int,
    learning_rate: float = 0.0001,
    train_epochs: int = 50,
    num_train_examples: int = YT8M_TRAIN_EXAMPLES,
    num_val_examples: int = YT8M_VAL_EXAMPLES,
):
    """Add and config a trainer to the experiment config."""
    if num_train_examples <= 0:
        raise ValueError('Wrong train dataset size {!r}'.format(
            experiment.task.train_data))
    if num_val_examples <= 0:
        raise ValueError('Wrong validation dataset size {!r}'.format(
            experiment.task.validation_data))
    experiment.task.train_data.global_batch_size = train_batch_size
    experiment.task.validation_data.global_batch_size = eval_batch_size
    steps_per_epoch = num_train_examples // train_batch_size
    steps_per_loop = 500
    experiment.trainer = cfg.TrainerConfig(
        steps_per_loop=steps_per_loop,
        summary_interval=steps_per_loop,
        checkpoint_interval=steps_per_loop,
        train_steps=train_epochs * steps_per_epoch,
        validation_steps=num_val_examples // eval_batch_size,
        validation_interval=steps_per_loop,
        optimizer_config=optimization.OptimizationConfig({
            'optimizer': {
                'type': 'adam',
                'adam': {}
            },
            'learning_rate': {
                'type': 'exponential',
                'exponential': {
                    'initial_learning_rate': learning_rate,
                    'decay_rate': 0.95,
                    'decay_steps': int(steps_per_epoch * 1.5),
                    'offset': 500,
                }
            },
            'warmup': {
                'linear': {
                    'name': 'linear',
                    'warmup_learning_rate': 0,
                    'warmup_steps': 500,
                },
                'type': 'linear',
            }
        }))
    return experiment
示例#14
0
def unified_detector() -> cfg.ExperimentConfig:
    """Configurations for trainer of unified detector."""
    total_train_steps = 100000
    summary_interval = steps_per_loop = 200
    checkpoint_interval = 2000
    warmup_steps = 1000
    config = cfg.ExperimentConfig(
        # Input pipeline and model are configured through Gin.
        task=OcrTaskConfig(train_data=cfg.DataConfig(is_training=True)),
        trainer=cfg.TrainerConfig(
            train_steps=total_train_steps,
            steps_per_loop=steps_per_loop,
            summary_interval=summary_interval,
            checkpoint_interval=checkpoint_interval,
            max_to_keep=1,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'adamw',
                    'adamw': {
                        'weight_decay_rate':
                        0.05,
                        'include_in_weight_decay': [
                            '^((?!depthwise).)*(kernel|weights):0$',
                        ],
                        'exclude_from_weight_decay': [
                            '(^((?!kernel).)*:0)|(depthwise_kernel)',
                        ],
                        'gradient_clip_norm':
                        10.,
                    },
                },
                'learning_rate': {
                    'type': 'cosine',
                    'cosine': {
                        'initial_learning_rate': 1e-3,
                        'decay_steps': total_train_steps - warmup_steps,
                        'alpha': 1e-2,
                        'offset': warmup_steps,
                    },
                },
                'warmup': {
                    'type': 'linear',
                    'linear': {
                        'warmup_learning_rate': 1e-5,
                        'warmup_steps': warmup_steps,
                    }
                },
            }),
        ),
    )
    return config
示例#15
0
def teams_pretrain() -> cfg.ExperimentConfig:
  """TEAMS pretraining."""
  config = cfg.ExperimentConfig(
      task=teams_task.TeamsPretrainTaskConfig(
          train_data=pretrain_dataloader.BertPretrainDataConfig(),
          validation_data=pretrain_dataloader.BertPretrainDataConfig(
              is_training=False)),
      trainer=cfg.TrainerConfig(
          optimizer_config=TeamsOptimizationConfig(), train_steps=1000000),
      restrictions=[
          "task.train_data.is_training != None",
          "task.validation_data.is_training != None"
      ])
  return config
示例#16
0
def teams_squad() -> cfg.ExperimentConfig:
  """Teams Squad V1/V2."""
  config = cfg.ExperimentConfig(
      task=question_answering.QuestionAnsweringConfig(
          model=question_answering.ModelConfig(
              encoder=encoders.EncoderConfig(
                  type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
          train_data=question_answering_dataloader.QADataConfig(),
          validation_data=question_answering_dataloader.QADataConfig()),
      trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
      restrictions=[
          "task.train_data.is_training != None",
          "task.validation_data.is_training != None"
      ])
  return config
示例#17
0
def yt8m_experiment() -> cfg.ExperimentConfig:
    """Video classification general."""
    exp_config = cfg.ExperimentConfig(
        runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
        task=YT8MTask(),
        trainer=cfg.TrainerConfig(),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None',
            'task.train_data.num_classes == task.validation_data.num_classes',
            'task.train_data.feature_sizes != None',
            'task.train_data.feature_names != None',
        ])

    return add_trainer(exp_config, train_batch_size=512, eval_batch_size=512)
def add_trainer(experiment: cfg.ExperimentConfig,
                train_batch_size: int,
                eval_batch_size: int,
                learning_rate: float = 1.6,
                train_epochs: int = 44,
                warmup_epochs: int = 5):
  """Add and config a trainer to the experiment config."""
  if experiment.task.train_data.num_examples <= 0:
    raise ValueError('Wrong train dataset size {!r}'.format(
        experiment.task.train_data))
  if experiment.task.validation_data.num_examples <= 0:
    raise ValueError('Wrong validation dataset size {!r}'.format(
        experiment.task.validation_data))
  experiment.task.train_data.global_batch_size = train_batch_size
  experiment.task.validation_data.global_batch_size = eval_batch_size
  steps_per_epoch = experiment.task.train_data.num_examples // train_batch_size
  experiment.trainer = cfg.TrainerConfig(
      steps_per_loop=steps_per_epoch,
      summary_interval=steps_per_epoch,
      checkpoint_interval=steps_per_epoch,
      train_steps=train_epochs * steps_per_epoch,
      validation_steps=experiment.task.validation_data.num_examples //
      eval_batch_size,
      validation_interval=steps_per_epoch,
      optimizer_config=optimization.OptimizationConfig({
          'optimizer': {
              'type': 'sgd',
              'sgd': {
                  'momentum': 0.9,
                  'nesterov': True,
              }
          },
          'learning_rate': {
              'type': 'cosine',
              'cosine': {
                  'initial_learning_rate': learning_rate,
                  'decay_steps': train_epochs * steps_per_epoch,
              }
          },
          'warmup': {
              'type': 'linear',
              'linear': {
                  'warmup_steps': warmup_epochs * steps_per_epoch,
                  'warmup_learning_rate': 0
              }
          }
      }))
  return experiment
示例#19
0
def teams_sentence_prediction() -> cfg.ExperimentConfig:
  r"""Teams GLUE."""
  config = cfg.ExperimentConfig(
      task=sentence_prediction.SentencePredictionConfig(
          model=sentence_prediction.ModelConfig(
              encoder=encoders.EncoderConfig(
                  type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
          train_data=sentence_prediction_dataloader
          .SentencePredictionDataConfig(),
          validation_data=sentence_prediction_dataloader
          .SentencePredictionDataConfig(
              is_training=False, drop_remainder=False)),
      trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
      restrictions=[
          "task.train_data.is_training != None",
          "task.validation_data.is_training != None"
      ])
  return config
 def test_export_best_ckpt(self, distribution):
     config = cfg.ExperimentConfig(trainer=cfg.TrainerConfig(
         best_checkpoint_export_subdir='best_ckpt',
         best_checkpoint_eval_metric='acc',
         optimizer_config=cfg.OptimizationConfig({
             'optimizer': {
                 'type': 'sgd'
             },
             'learning_rate': {
                 'type': 'constant'
             }
         })))
     model_dir = self.get_temp_dir()
     trainer = self.create_test_trainer(config, model_dir=model_dir)
     trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
     trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
     self.assertTrue(
         tf.io.gfile.exists(
             os.path.join(model_dir, 'best_ckpt', 'info.json')))
示例#21
0
def bert_classification_example() -> cfg.ExperimentConfig:
    """Return a minimum experiment config for Bert token classification."""
    return cfg.ExperimentConfig(
        task=ClassificationExampleConfig(),
        trainer=cfg.TrainerConfig(
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'adamw',
                },
                'learning_rate': {
                    'type': 'polynomial',
                },
                'warmup': {
                    'type': 'polynomial'
                }
            })),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None'
        ])
def token_drop_bert_pretraining() -> cfg.ExperimentConfig:
    """BERT pretraining with token dropping."""
    config = cfg.ExperimentConfig(
        runtime=cfg.RuntimeConfig(enable_xla=True),
        task=masked_lm.TokenDropMaskedLMConfig(
            model=bert.PretrainerConfig(encoder=encoders.EncoderConfig(
                any=encoder_config.TokenDropBertEncoderConfig(
                    vocab_size=30522, num_layers=1, token_keep_k=64),
                type='any')),
            train_data=pretrain_dataloader.BertPretrainDataConfig(),
            validation_data=pretrain_dataloader.BertPretrainDataConfig(
                is_training=False)),
        trainer=cfg.TrainerConfig(
            train_steps=1000000,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'adamw',
                    'adamw': {
                        'weight_decay_rate':
                        0.01,
                        'exclude_from_weight_decay':
                        ['LayerNorm', 'layer_norm', 'bias'],
                    }
                },
                'learning_rate': {
                    'type': 'polynomial',
                    'polynomial': {
                        'initial_learning_rate': 1e-4,
                        'end_learning_rate': 0.0,
                    }
                },
                'warmup': {
                    'type': 'polynomial'
                }
            })),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None'
        ])
    return config
示例#23
0
def add_trainer(
    experiment: cfg.ExperimentConfig,
    train_batch_size: int,
    eval_batch_size: int,
    learning_rate: float = 0.005,
    train_epochs: int = 44,
):
    """Add and config a trainer to the experiment config."""
    if YT8M_TRAIN_EXAMPLES <= 0:
        raise ValueError('Wrong train dataset size {!r}'.format(
            experiment.task.train_data))
    if YT8M_VAL_EXAMPLES <= 0:
        raise ValueError('Wrong validation dataset size {!r}'.format(
            experiment.task.validation_data))
    experiment.task.train_data.global_batch_size = train_batch_size
    experiment.task.validation_data.global_batch_size = eval_batch_size
    steps_per_epoch = YT8M_TRAIN_EXAMPLES // train_batch_size
    experiment.trainer = cfg.TrainerConfig(
        steps_per_loop=steps_per_epoch,
        summary_interval=steps_per_epoch,
        checkpoint_interval=steps_per_epoch,
        train_steps=train_epochs * steps_per_epoch,
        validation_steps=YT8M_VAL_EXAMPLES // eval_batch_size,
        validation_interval=steps_per_epoch,
        optimizer_config=optimization.OptimizationConfig({
            'optimizer': {
                'type': 'adam',
                'adam': {}
            },
            'learning_rate': {
                'type': 'exponential',
                'exponential': {
                    'initial_learning_rate': learning_rate,
                    'decay_rate': 0.95,
                    'decay_steps': 1500000,
                }
            },
        }))
    return experiment
def bert_sentence_prediction_text() -> cfg.ExperimentConfig:
    r"""BERT sentence prediction with raw text data.

  Example: use tf.text and tfds as input with glue_mnli_text.yaml
  """
    config = cfg.ExperimentConfig(
        task=sentence_prediction.SentencePredictionConfig(
            train_data=sentence_prediction_dataloader.
            SentencePredictionTextDataConfig(),
            validation_data=sentence_prediction_dataloader.
            SentencePredictionTextDataConfig(is_training=False,
                                             drop_remainder=False)),
        trainer=cfg.TrainerConfig(
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'adamw',
                    'adamw': {
                        'weight_decay_rate':
                        0.01,
                        'exclude_from_weight_decay':
                        ['LayerNorm', 'layer_norm', 'bias'],
                    }
                },
                'learning_rate': {
                    'type': 'polynomial',
                    'polynomial': {
                        'initial_learning_rate': 3e-5,
                        'end_learning_rate': 0.0,
                    }
                },
                'warmup': {
                    'type': 'polynomial'
                }
            })),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None'
        ])
    return config
示例#25
0
def roformer_glue() -> cfg.ExperimentConfig:
  r"""BigBird GLUE."""
  config = cfg.ExperimentConfig(
      task=sentence_prediction.SentencePredictionConfig(
          model=sentence_prediction.ModelConfig(
              encoder=encoders.EncoderConfig(
                  type='any', any=roformer.RoformerEncoderConfig())),
          train_data=sentence_prediction_dataloader
          .SentencePredictionDataConfig(),
          validation_data=sentence_prediction_dataloader
          .SentencePredictionDataConfig(
              is_training=False, drop_remainder=False)),
      trainer=cfg.TrainerConfig(
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'adamw',
                  'adamw': {
                      'weight_decay_rate':
                          0.01,
                      'exclude_from_weight_decay':
                          ['LayerNorm', 'layer_norm', 'bias'],
                  }
              },
              'learning_rate': {
                  'type': 'polynomial',
                  'polynomial': {
                      'initial_learning_rate': 3e-5,
                      'end_learning_rate': 0.0,
                  }
              },
              'warmup': {
                  'type': 'polynomial'
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])
  return config
示例#26
0
def labse_train() -> cfg.ExperimentConfig:
    r"""Language-agnostic bert sentence embedding.

  *Note*: this experiment does not use cross-accelerator global softmax so it
  does not reproduce the exact LABSE training.
  """
    config = cfg.ExperimentConfig(
        task=dual_encoder.DualEncoderConfig(
            train_data=dual_encoder_dataloader.DualEncoderDataConfig(),
            validation_data=dual_encoder_dataloader.DualEncoderDataConfig(
                is_training=False, drop_remainder=False)),
        trainer=cfg.TrainerConfig(optimizer_config=LaBSEOptimizationConfig(
            learning_rate=optimization.LrConfig(type="polynomial",
                                                polynomial=PolynomialLr(
                                                    initial_learning_rate=3e-5,
                                                    end_learning_rate=0.0)),
            warmup=optimization.WarmupConfig(
                type="polynomial", polynomial=PolynomialWarmupConfig()))),
        restrictions=[
            "task.train_data.is_training != None",
            "task.validation_data.is_training != None"
        ])
    return config
示例#27
0
  def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
    config = cfg.ExperimentConfig(
        runtime=cfg.RuntimeConfig(
            mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
        trainer=cfg.TrainerConfig(
            optimizer_config=cfg.OptimizationConfig({
                'optimizer': {
                    'type': 'sgd'
                },
                'learning_rate': {
                    'type': 'constant'
                },
            })))
    trainer = self.create_test_trainer(config)
    if mixed_precision_dtype != 'float16':
      self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
    elif mixed_precision_dtype == 'float16' and loss_scale is None:
      self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
    else:
      self.assertIsInstance(trainer.optimizer,
                            tf.keras.mixed_precision.LossScaleOptimizer)

    metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
    self.assertIn('training_loss', metrics)
示例#28
0
def cascadercnn_spinenet_coco() -> cfg.ExperimentConfig:
  """COCO object detection with Cascade RCNN-RS with SpineNet backbone."""
  steps_per_epoch = 463
  coco_val_samples = 5000
  train_batch_size = 256
  eval_batch_size = 8

  config = cfg.ExperimentConfig(
      runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
      task=MaskRCNNTask(
          annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
                                       'instances_val2017.json'),
          model=MaskRCNN(
              backbone=backbones.Backbone(
                  type='spinenet',
                  spinenet=backbones.SpineNet(
                      model_id='49',
                      min_level=3,
                      max_level=7,
                  )),
              decoder=decoders.Decoder(
                  type='identity', identity=decoders.Identity()),
              roi_sampler=ROISampler(cascade_iou_thresholds=[0.6, 0.7]),
              detection_head=DetectionHead(
                  class_agnostic_bbox_pred=True, cascade_class_ensemble=True),
              anchor=Anchor(anchor_size=3),
              norm_activation=common.NormActivation(
                  use_sync_bn=True, activation='swish'),
              num_classes=91,
              input_size=[640, 640, 3],
              min_level=3,
              max_level=7,
              include_mask=True),
          losses=Losses(l2_weight_decay=0.00004),
          train_data=DataConfig(
              input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
              is_training=True,
              global_batch_size=train_batch_size,
              parser=Parser(
                  aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.5)),
          validation_data=DataConfig(
              input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
              is_training=False,
              global_batch_size=eval_batch_size,
              drop_remainder=False)),
      trainer=cfg.TrainerConfig(
          train_steps=steps_per_epoch * 500,
          validation_steps=coco_val_samples // eval_batch_size,
          validation_interval=steps_per_epoch,
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
                  'sgd': {
                      'momentum': 0.9
                  }
              },
              'learning_rate': {
                  'type': 'stepwise',
                  'stepwise': {
                      'boundaries': [
                          steps_per_epoch * 475, steps_per_epoch * 490
                      ],
                      'values': [0.32, 0.032, 0.0032],
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 2000,
                      'warmup_learning_rate': 0.0067
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None',
          'task.model.min_level == task.model.backbone.spinenet.min_level',
          'task.model.max_level == task.model.backbone.spinenet.max_level',
      ])
  return config
示例#29
0
def maskrcnn_resnetfpn_coco() -> cfg.ExperimentConfig:
  """COCO object detection with Mask R-CNN."""
  steps_per_epoch = 500
  coco_val_samples = 5000
  train_batch_size = 64
  eval_batch_size = 8

  config = cfg.ExperimentConfig(
      runtime=cfg.RuntimeConfig(
          mixed_precision_dtype='bfloat16', enable_xla=True),
      task=MaskRCNNTask(
          init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
          init_checkpoint_modules='backbone',
          annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
                                       'instances_val2017.json'),
          model=MaskRCNN(
              num_classes=91, input_size=[1024, 1024, 3], include_mask=True),
          losses=Losses(l2_weight_decay=0.00004),
          train_data=DataConfig(
              input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
              is_training=True,
              global_batch_size=train_batch_size,
              parser=Parser(
                  aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.25)),
          validation_data=DataConfig(
              input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
              is_training=False,
              global_batch_size=eval_batch_size,
              drop_remainder=False)),
      trainer=cfg.TrainerConfig(
          train_steps=22500,
          validation_steps=coco_val_samples // eval_batch_size,
          validation_interval=steps_per_epoch,
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
                  'sgd': {
                      'momentum': 0.9
                  }
              },
              'learning_rate': {
                  'type': 'stepwise',
                  'stepwise': {
                      'boundaries': [15000, 20000],
                      'values': [0.12, 0.012, 0.0012],
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 500,
                      'warmup_learning_rate': 0.0067
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])
  return config
示例#30
0
def maskrcnn_spinenet_coco() -> cfg.ExperimentConfig:
    """COCO object detection with Mask R-CNN with SpineNet backbone."""
    steps_per_epoch = 463
    coco_val_samples = 5000

    config = cfg.ExperimentConfig(
        runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
        task=MaskRCNNTask(
            annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
                                         'instances_val2017.json'),
            model=MaskRCNN(
                backbone=backbones.Backbone(
                    type='spinenet',
                    spinenet=backbones.SpineNet(model_id='49')),
                decoder=decoders.Decoder(type='identity',
                                         identity=decoders.Identity()),
                anchor=Anchor(anchor_size=3),
                norm_activation=common.NormActivation(use_sync_bn=True),
                num_classes=91,
                input_size=[640, 640, 3],
                min_level=3,
                max_level=7,
                include_mask=True),
            losses=Losses(l2_weight_decay=0.00004),
            train_data=DataConfig(input_path=os.path.join(
                COCO_INPUT_PATH_BASE, 'train*'),
                                  is_training=True,
                                  global_batch_size=256,
                                  parser=Parser(aug_rand_hflip=True,
                                                aug_scale_min=0.5,
                                                aug_scale_max=2.0)),
            validation_data=DataConfig(input_path=os.path.join(
                COCO_INPUT_PATH_BASE, 'val*'),
                                       is_training=False,
                                       global_batch_size=8)),
        trainer=cfg.TrainerConfig(
            train_steps=steps_per_epoch * 350,
            validation_steps=coco_val_samples // 8,
            validation_interval=steps_per_epoch,
            steps_per_loop=steps_per_epoch,
            summary_interval=steps_per_epoch,
            checkpoint_interval=steps_per_epoch,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'sgd',
                    'sgd': {
                        'momentum': 0.9
                    }
                },
                'learning_rate': {
                    'type': 'stepwise',
                    'stepwise': {
                        'boundaries':
                        [steps_per_epoch * 320, steps_per_epoch * 340],
                        'values': [0.28, 0.028, 0.0028],
                    }
                },
                'warmup': {
                    'type': 'linear',
                    'linear': {
                        'warmup_steps': 2000,
                        'warmup_learning_rate': 0.0067
                    }
                }
            })),
        restrictions=[
            'task.train_data.is_training != None',
            'task.validation_data.is_training != None'
        ])
    return config