Exemple #1
0
 def test_train_step(self):
     config = detr_cfg.DetectionConfig(num_encoder_layers=1,
                                       num_decoder_layers=1,
                                       train_data=coco.COCODataConfig(
                                           tfds_name='coco/2017',
                                           tfds_split='validation',
                                           is_training=True,
                                           global_batch_size=2,
                                       ))
     with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
         task = detection.DectectionTask(config)
         model = task.build_model()
         dataset = task.build_inputs(config.train_data)
         iterator = iter(dataset)
         opt_cfg = optimization.OptimizationConfig({
             'optimizer': {
                 'type': 'detr_adamw',
                 'detr_adamw': {
                     'weight_decay_rate': 1e-4,
                     'global_clipnorm': 0.1,
                 }
             },
             'learning_rate': {
                 'type': 'stepwise',
                 'stepwise': {
                     'boundaries': [120000],
                     'values': [0.0001, 1.0e-05]
                 }
             },
         })
         optimizer = detection.DectectionTask.create_optimizer(opt_cfg)
         task.train_step(next(iterator), model, optimizer)
Exemple #2
0
def detr_coco() -> cfg.ExperimentConfig:
    """Config to get results that matches the paper."""
    train_batch_size = 64
    eval_batch_size = 64
    num_train_data = COCO_TRAIN_EXAMPLES
    num_steps_per_epoch = num_train_data // train_batch_size
    train_steps = 500 * num_steps_per_epoch  # 500 epochs
    decay_at = train_steps - 100 * num_steps_per_epoch  # 400 epochs
    config = cfg.ExperimentConfig(
        task=DetrTask(init_checkpoint='',
                      init_checkpoint_modules='backbone',
                      model=Detr(num_classes=81,
                                 input_size=[1333, 1333, 3],
                                 norm_activation=common.NormActivation()),
                      losses=Losses(),
                      train_data=coco.COCODataConfig(
                          tfds_name='coco/2017',
                          tfds_split='train',
                          is_training=True,
                          global_batch_size=train_batch_size,
                          shuffle_buffer_size=1000,
                      ),
                      validation_data=coco.COCODataConfig(
                          tfds_name='coco/2017',
                          tfds_split='validation',
                          is_training=False,
                          global_batch_size=eval_batch_size,
                          drop_remainder=False)),
        trainer=cfg.TrainerConfig(
            train_steps=train_steps,
            validation_steps=-1,
            steps_per_loop=10000,
            summary_interval=10000,
            checkpoint_interval=10000,
            validation_interval=10000,
            max_to_keep=1,
            best_checkpoint_export_subdir='best_ckpt',
            best_checkpoint_eval_metric='AP',
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'detr_adamw',
                    'detr_adamw': {
                        'weight_decay_rate': 1e-4,
                        'global_clipnorm': 0.1,
                        # Avoid AdamW legacy behavior.
                        'gradient_clip_norm': 0.0
                    }
                },
                'learning_rate': {
                    'type': 'stepwise',
                    'stepwise': {
                        'boundaries': [decay_at],
                        'values': [0.0001, 1.0e-05]
                    }
                },
            })),
        restrictions=[
            'task.train_data.is_training != None',
        ])
    return config
Exemple #3
0
 def test_train_step(self):
   config = detr_cfg.DetrTask(
       model=detr_cfg.Detr(
           input_size=[1333, 1333, 3],
           num_encoder_layers=1,
           num_decoder_layers=1,
           backbone=backbones.Backbone(
               type='resnet',
               resnet=backbones.ResNet(model_id=10, bn_trainable=False))
       ),
       losses=detr_cfg.Losses(class_offset=1),
       train_data=detr_cfg.DataConfig(
           tfds_name='coco/2017',
           tfds_split='validation',
           is_training=True,
           global_batch_size=2,
       ))
   with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
     task = detection.DetectionTask(config)
     model = task.build_model()
     dataset = task.build_inputs(config.train_data)
     iterator = iter(dataset)
     opt_cfg = optimization.OptimizationConfig({
         'optimizer': {
             'type': 'detr_adamw',
             'detr_adamw': {
                 'weight_decay_rate': 1e-4,
                 'global_clipnorm': 0.1,
             }
         },
         'learning_rate': {
             'type': 'stepwise',
             'stepwise': {
                 'boundaries': [120000],
                 'values': [0.0001, 1.0e-05]
             }
         },
     })
     optimizer = detection.DetectionTask.create_optimizer(opt_cfg)
     task.train_step(next(iterator), model, optimizer)
Exemple #4
0
def detr_coco_tfrecord() -> cfg.ExperimentConfig:
    """Config to get results that matches the paper."""
    train_batch_size = 64
    eval_batch_size = 64
    steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
    train_steps = 300 * steps_per_epoch  # 300 epochs
    decay_at = train_steps - 100 * steps_per_epoch  # 200 epochs
    config = cfg.ExperimentConfig(
        task=DetrTask(init_checkpoint='',
                      init_checkpoint_modules='backbone',
                      annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
                                                   'instances_val2017.json'),
                      model=Detr(input_size=[1333, 1333, 3],
                                 norm_activation=common.NormActivation()),
                      losses=Losses(),
                      train_data=DataConfig(
                          input_path=os.path.join(COCO_INPUT_PATH_BASE,
                                                  'train*'),
                          is_training=True,
                          global_batch_size=train_batch_size,
                          shuffle_buffer_size=1000,
                      ),
                      validation_data=DataConfig(
                          input_path=os.path.join(COCO_INPUT_PATH_BASE,
                                                  'val*'),
                          is_training=False,
                          global_batch_size=eval_batch_size,
                          drop_remainder=False,
                      )),
        trainer=cfg.TrainerConfig(
            train_steps=train_steps,
            validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
            steps_per_loop=steps_per_epoch,
            summary_interval=steps_per_epoch,
            checkpoint_interval=steps_per_epoch,
            validation_interval=5 * steps_per_epoch,
            max_to_keep=1,
            best_checkpoint_export_subdir='best_ckpt',
            best_checkpoint_eval_metric='AP',
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'detr_adamw',
                    'detr_adamw': {
                        'weight_decay_rate': 1e-4,
                        'global_clipnorm': 0.1,
                        # Avoid AdamW legacy behavior.
                        'gradient_clip_norm': 0.0
                    }
                },
                'learning_rate': {
                    'type': 'stepwise',
                    'stepwise': {
                        'boundaries': [decay_at],
                        'values': [0.0001, 1.0e-05]
                    }
                },
            })),
        restrictions=[
            'task.train_data.is_training != None',
        ])
    return config