def test_trainer_with_configs(self, distribution):
     config = configs.MultiTaskConfig(task_routines=(
         configs.TaskRoutine(task_name="foo",
                             task_config=test_utils.FooConfig(),
                             task_weight=3.0),
         configs.TaskRoutine(task_name="bar",
                             task_config=test_utils.BarConfig(),
                             task_weight=1.0)))
     with distribution.scope():
         test_multitask = multitask.MultiTask.from_config(config)
     test_optimizer = tf.keras.optimizers.SGD(0.1)
     model = test_utils.MockMultiTaskModel()
     num_step = 1000
     sampler = task_sampler.AnnealingTaskSampler(
         task_weights=test_multitask.task_weights,
         steps_per_epoch=num_step / 5,
         total_steps=num_step)
     test_trainer = interleaving_trainer.MultiTaskInterleavingTrainer(
         multi_task=test_multitask,
         multi_task_model=model,
         optimizer=test_optimizer,
         task_sampler=sampler)
     results = test_trainer.train(
         tf.convert_to_tensor(num_step, dtype=tf.int32))
     self.assertContainsSubset(["training_loss", "bar_acc"],
                               results["bar"].keys())
     self.assertContainsSubset(["training_loss", "foo_acc"],
                               results["foo"].keys())
     self.assertEqual(test_trainer.global_step.numpy(), num_step)
     bar_sampled_step = test_trainer.task_step_counter("bar").numpy()
     foo_sampled_step = test_trainer.task_step_counter("foo").numpy()
     self.assertEqual(bar_sampled_step + foo_sampled_step, num_step)
 def test_trainer_with_configs(self):
   config = configs.MultiTaskConfig(
       task_routines=(configs.TaskRoutine(
           task_name="foo",
           task_config=test_utils.FooConfig(),
           task_weight=0.5),
                      configs.TaskRoutine(
                          task_name="bar",
                          task_config=test_utils.BarConfig(),
                          task_weight=0.5)))
   test_multitask = multitask.MultiTask.from_config(config)
   test_optimizer = tf.keras.optimizers.SGD(0.1)
   model = test_utils.MockMultiTaskModel()
   test_trainer = base_trainer.MultiTaskBaseTrainer(
       multi_task=test_multitask,
       multi_task_model=model,
       optimizer=test_optimizer)
   results = test_trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
   self.assertContainsSubset(["training_loss", "bar_acc"],
                             results["bar"].keys())
   self.assertContainsSubset(["training_loss", "foo_acc"],
                             results["foo"].keys())
   self.assertEqual(test_multitask.task_weight("foo"), 0.5)
   self.assertEqual(test_trainer.global_step.numpy(), 5)
   self.assertIn("learning_rate", results)
Exemple #3
0
def multitask_simclr() -> multitask_configs.MultiTaskExperimentConfig:
    return multitask_configs.MultiTaskExperimentConfig(
        task=multitask_configs.MultiTaskConfig(
            model=SimCLRMTModelConfig(
                heads=(SimCLRMTHeadConfig(mode=simclr_model.PRETRAIN),
                       SimCLRMTHeadConfig(mode=simclr_model.FINETUNE))),
            task_routines=(multitask_configs.TaskRoutine(
                task_name=simclr_model.PRETRAIN,
                task_config=simclr_configs.SimCLRPretrainTask(),
                task_weight=2.0),
                           multitask_configs.TaskRoutine(
                               task_name=simclr_model.FINETUNE,
                               task_config=simclr_configs.SimCLRFinetuneTask(),
                               task_weight=1.0))),
        trainer=multitask_configs.MultiTaskTrainerConfig())
 def test_end_to_end(self, distribution_strategy, flag_mode):
   model_dir = self.get_temp_dir()
   experiment_config = configs.MultiTaskExperimentConfig(
       task=configs.MultiTaskConfig(
           task_routines=(
               configs.TaskRoutine(
                   task_name='foo', task_config=test_utils.FooConfig()),
               configs.TaskRoutine(
                   task_name='bar', task_config=test_utils.BarConfig()))))
   experiment_config = params_dict.override_params_dict(
       experiment_config, self._test_config, is_strict=False)
   with distribution_strategy.scope():
     test_multitask = multitask.MultiTask.from_config(experiment_config.task)
     model = test_utils.MockMultiTaskModel()
   train_lib.run_experiment(
       distribution_strategy=distribution_strategy,
       task=test_multitask,
       model=model,
       mode=flag_mode,
       params=experiment_config,
       model_dir=model_dir)
 def test_end_to_end_multi_eval(self, distribution_strategy, flag_mode):
   model_dir = self.get_temp_dir()
   experiment_config = configs.MultiEvalExperimentConfig(
       task=test_utils.FooConfig(),
       eval_tasks=configs.MultiTaskConfig(
           task_routines=(
               configs.TaskRoutine(
                   task_name='foo',
                   task_config=test_utils.FooConfig()),
               configs.TaskRoutine(
                   task_name='bar', task_config=test_utils.BarConfig()))))
   experiment_config = params_dict.override_params_dict(
       experiment_config, self._test_config, is_strict=False)
   with distribution_strategy.scope():
     train_task = task_factory.get_task(experiment_config.task)
     eval_tasks = multitask.MultiTask.from_config(experiment_config.eval_tasks)
   train_lib.run_experiment_with_multitask_eval(
       distribution_strategy=distribution_strategy,
       train_task=train_task,
       eval_tasks=eval_tasks,
       mode=flag_mode,
       params=experiment_config,
       model_dir=model_dir)
Exemple #6
0
def multitask_vision() -> multi_cfg.MultiTaskExperimentConfig:
    """
  Vision task with single backbone and multiple heads.
  Each head can be a segmenter, detector or classifier.
  TODO: use same num_class and input_size in both task and model definition

  multi_cfg.MultiTaskConfig:
    - Retains each task_name, entire task, eval_steps and weights,
        - Entire_task used in respective multitask trainers for train_step
        - Weights used in task_sampler
  
  multi_cfg.MultiTaskTrainerConfig:
    - trainer_type and task_sampler used to configure task sampling in train_lib
    - Normal multi_cfg.TrainerConfig params used directly in train_lib
  """
    input_path_segmentation = ''
    input_path_classification = ''
    input_path_yolo = ''
    steps_per_epoch = 6915 + 2486 + 600
    train_batch_size = 1
    eval_batch_size = 1
    validation_steps = 1021 + 621 + 600

    segmentation_routine = multi_cfg.TaskRoutine(
        task_name='segmentation',
        task_config=SemanticSegmentationSubtask(
            model=SemanticSegmentationModelSpecs(num_classes=19,
                                                 input_size=[256, 256, 3]),
            losses=SegmentationLosses(ignore_label=250,
                                      top_k_percent_pixels=0.3),
            train_data=SegmentationDataConfig(
                output_size=[256, 256],
                input_path=input_path_segmentation,
                global_batch_size=train_batch_size,
                is_training=True,
                aug_scale_min=0.5,
                aug_scale_max=2.0,
                preserve_aspect_ratio=False,
                aug_policy='randaug',
                randaug_magnitude=5,
                randaug_available_ops=[
                    'AutoContrast', 'Equalize', 'Invert', 'Rotate',
                    'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness',
                    'Sharpness', 'Cutout', 'SolarizeAdd'
                ]),
            validation_data=SegmentationDataConfig(
                output_size=[256, 256],
                input_path=input_path_segmentation,
                global_batch_size=eval_batch_size,
                is_training=False,
                resize_eval_groundtruth=True,
                drop_remainder=False)),
        eval_steps=603,  # check where eval steps is used
        task_weight=1.0)
    classification_routine = multi_cfg.TaskRoutine(
        task_name='classification',
        task_config=ImageClassificationSubtask(
            model=ImageClassificationModelSpecs(num_classes=4,
                                                input_size=[256, 256, 3]),
            losses=ClassificationLosses(label_smoothing=0.1),
            train_data=ClassificationDataConfig(
                input_path=input_path_classification,
                is_training=True,
                global_batch_size=train_batch_size,
                aug_policy='randaug',
                randaug_magnitude=5),
            validation_data=ClassificationDataConfig(
                input_path=input_path_classification,
                is_training=False,
                global_batch_size=eval_batch_size,
                drop_remainder=False)),
        eval_steps=621,  # check where eval steps is used
        task_weight=1.0)
    yolo_routine = multi_cfg.TaskRoutine(
        task_name='yolo',
        task_config=YoloSubtask(
            model=YoloModelSpecs(num_classes=4,
                                 input_size=[256, 256, 3],
                                 head=YoloHead(anchor_per_scale=3,
                                               strides=[16, 32, 64],
                                               anchors=[
                                                   12, 16, 19, 36, 40, 28, 36,
                                                   75, 76, 55, 72, 146, 142,
                                                   110, 192, 243, 459, 401
                                               ],
                                               xy_scale=[1.2, 1.1, 1.05])),
            losses=YoloLosses(l2_weight_decay=1e-4, iou_loss_thres=0.5),
            train_data=YoloDataConfig(input_path=input_path_yolo,
                                      is_training=True,
                                      global_batch_size=train_batch_size,
                                      aug_policy='randaug',
                                      randaug_magnitude=5),
            validation_data=YoloDataConfig(input_path=input_path_yolo,
                                           is_training=False,
                                           global_batch_size=eval_batch_size,
                                           drop_remainder=False)),
        eval_steps=600,  # check where eval steps is used
        task_weight=1.0)

    model_config = MultiHeadModel(
        input_size=[256, 256, 3],
        backbone=backbones.Backbone(type='hardnet',
                                    hardnet=backbones.HardNet(model_id=70)),
        norm_activation=common.NormActivation(activation='relu',
                                              norm_momentum=0.9997,
                                              norm_epsilon=0.001,
                                              use_sync_bn=True),
        heads=[
            Submodel(
                name='classification',
                num_classes=4,
                head=ImageClassificationHead(
                    level=0,  # decoder is identity function
                    num_convs=2,
                    num_filters=256,
                    add_head_batch_norm=False,
                    dropout_rate=0.2)),
            Submodel(name='segmentation',
                     num_classes=19,
                     decoder=decoders.Decoder(
                         type='hardnet',
                         hardnet=decoders.HardNet(model_id=70)),
                     head=SegmentationHead(level=0,
                                           num_convs=0,
                                           feature_fusion=None,
                                           low_level=0,
                                           low_level_num_filters=0)),
            Submodel(name='yolo',
                     num_classes=4,
                     decoder=decoders.Decoder(type='pan',
                                              pan=decoders.PAN(levels=3)),
                     head=YoloHead(anchor_per_scale=3,
                                   strides=[16, 32, 64],
                                   anchors=[
                                       12, 16, 19, 36, 40, 28, 36, 75, 76, 55,
                                       72, 146, 142, 110, 192, 243, 459, 401
                                   ],
                                   xy_scale=[1.2, 1.1, 1.05]))
        ],
        l2_weight_decay=1e-4)

    return multi_cfg.MultiTaskExperimentConfig(
        task=multi_cfg.MultiTaskConfig(model=model_config,
                                       init_checkpoint=None,
                                       task_routines=(segmentation_routine,
                                                      classification_routine,
                                                      yolo_routine)),
        trainer=multi_cfg.MultiTaskTrainerConfig(
            trainer_type="interleaving",
            task_sampler=multi_cfg.TaskSamplingConfig(
                type="proportional",
                proportional=multi_cfg.ProportionalSampleConfig(
                    alpha=1.0)),  # uniform, proportional or annealing
            steps_per_loop=steps_per_epoch,
            summary_interval=steps_per_epoch,
            checkpoint_interval=steps_per_epoch,
            train_steps=45 * steps_per_epoch,
            validation_steps=validation_steps,
            validation_interval=steps_per_epoch,
            best_checkpoint_eval_metric='mean_iou',
            continuous_eval_timeout=3600,
            max_to_keep=5,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'sgd',
                    'sgd': {
                        'momentum': 0.9
                    }
                },
                'learning_rate': {
                    'type': 'polynomial',
                    'polynomial': {
                        'initial_learning_rate': 0.007,
                        'decay_steps': 45 * steps_per_epoch,
                        'end_learning_rate': 0.0,
                        'power': 0.9
                    }
                },
                'warmup': {
                    'type': 'linear',
                    'linear': {
                        'warmup_steps': 5 * steps_per_epoch,
                        'warmup_learning_rate': 0
                    }
                }
            })))