Exemplo n.º 1
0
 def test_proportional_sample_distribution(self):
   prop_sampler = sampler.get_task_sampler(
       configs.TaskSamplingConfig(
           type='proportional',
           proportional=configs.ProportionalSampleConfig(alpha=2.0)),
       self._task_weights)
   # CucmulativeOf(Normalize([1.0^2, 2.0^2, 3.0^2]))
   for step in range(5):
     cumulative_distribution = prop_sampler.task_cumulative_distribution(
         tf.constant(step, dtype=tf.int64))
     self.assertAllClose([0.07142857, 0.35714286, 1.0],
                         cumulative_distribution.numpy())
Exemplo n.º 2
0
  def test_annealing_sample_distribution(self):
    num_epoch = 3
    step_per_epoch = 6
    annel_sampler = sampler.get_task_sampler(
        configs.TaskSamplingConfig(
            type='annealing',
            annealing=configs.AnnealingSampleConfig(
                steps_per_epoch=step_per_epoch,
                total_steps=step_per_epoch * num_epoch)), self._task_weights)

    global_step = tf.Variable(
        0, dtype=tf.int64, name='global_step', trainable=False)
    expected_cumulative_epochs = [[0.12056106, 0.4387236, 1.0],
                                  [0.16666667, 0.5, 1.0],
                                  [0.22477472, 0.5654695, 1.0]]
    for epoch in range(num_epoch):
      for _ in range(step_per_epoch):
        cumulative_distribution = annel_sampler.task_cumulative_distribution(
            tf.constant(global_step, dtype=tf.int64))
        global_step.assign_add(1)
        self.assertAllClose(expected_cumulative_epochs[epoch],
                            cumulative_distribution.numpy())
Exemplo n.º 3
0
def multitask_vision() -> multi_cfg.MultiTaskExperimentConfig:
    """
  Vision task with single backbone and multiple heads.
  Each head can be a segmenter, detector or classifier.
  TODO: use same num_class and input_size in both task and model definition

  multi_cfg.MultiTaskConfig:
    - Retains each task_name, entire task, eval_steps and weights,
        - Entire_task used in respective multitask trainers for train_step
        - Weights used in task_sampler
  
  multi_cfg.MultiTaskTrainerConfig:
    - trainer_type and task_sampler used to configure task sampling in train_lib
    - Normal multi_cfg.TrainerConfig params used directly in train_lib
  """
    input_path_segmentation = ''
    input_path_classification = ''
    input_path_yolo = ''
    steps_per_epoch = 6915 + 2486 + 600
    train_batch_size = 1
    eval_batch_size = 1
    validation_steps = 1021 + 621 + 600

    segmentation_routine = multi_cfg.TaskRoutine(
        task_name='segmentation',
        task_config=SemanticSegmentationSubtask(
            model=SemanticSegmentationModelSpecs(num_classes=19,
                                                 input_size=[256, 256, 3]),
            losses=SegmentationLosses(ignore_label=250,
                                      top_k_percent_pixels=0.3),
            train_data=SegmentationDataConfig(
                output_size=[256, 256],
                input_path=input_path_segmentation,
                global_batch_size=train_batch_size,
                is_training=True,
                aug_scale_min=0.5,
                aug_scale_max=2.0,
                preserve_aspect_ratio=False,
                aug_policy='randaug',
                randaug_magnitude=5,
                randaug_available_ops=[
                    'AutoContrast', 'Equalize', 'Invert', 'Rotate',
                    'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness',
                    'Sharpness', 'Cutout', 'SolarizeAdd'
                ]),
            validation_data=SegmentationDataConfig(
                output_size=[256, 256],
                input_path=input_path_segmentation,
                global_batch_size=eval_batch_size,
                is_training=False,
                resize_eval_groundtruth=True,
                drop_remainder=False)),
        eval_steps=603,  # check where eval steps is used
        task_weight=1.0)
    classification_routine = multi_cfg.TaskRoutine(
        task_name='classification',
        task_config=ImageClassificationSubtask(
            model=ImageClassificationModelSpecs(num_classes=4,
                                                input_size=[256, 256, 3]),
            losses=ClassificationLosses(label_smoothing=0.1),
            train_data=ClassificationDataConfig(
                input_path=input_path_classification,
                is_training=True,
                global_batch_size=train_batch_size,
                aug_policy='randaug',
                randaug_magnitude=5),
            validation_data=ClassificationDataConfig(
                input_path=input_path_classification,
                is_training=False,
                global_batch_size=eval_batch_size,
                drop_remainder=False)),
        eval_steps=621,  # check where eval steps is used
        task_weight=1.0)
    yolo_routine = multi_cfg.TaskRoutine(
        task_name='yolo',
        task_config=YoloSubtask(
            model=YoloModelSpecs(num_classes=4,
                                 input_size=[256, 256, 3],
                                 head=YoloHead(anchor_per_scale=3,
                                               strides=[16, 32, 64],
                                               anchors=[
                                                   12, 16, 19, 36, 40, 28, 36,
                                                   75, 76, 55, 72, 146, 142,
                                                   110, 192, 243, 459, 401
                                               ],
                                               xy_scale=[1.2, 1.1, 1.05])),
            losses=YoloLosses(l2_weight_decay=1e-4, iou_loss_thres=0.5),
            train_data=YoloDataConfig(input_path=input_path_yolo,
                                      is_training=True,
                                      global_batch_size=train_batch_size,
                                      aug_policy='randaug',
                                      randaug_magnitude=5),
            validation_data=YoloDataConfig(input_path=input_path_yolo,
                                           is_training=False,
                                           global_batch_size=eval_batch_size,
                                           drop_remainder=False)),
        eval_steps=600,  # check where eval steps is used
        task_weight=1.0)

    model_config = MultiHeadModel(
        input_size=[256, 256, 3],
        backbone=backbones.Backbone(type='hardnet',
                                    hardnet=backbones.HardNet(model_id=70)),
        norm_activation=common.NormActivation(activation='relu',
                                              norm_momentum=0.9997,
                                              norm_epsilon=0.001,
                                              use_sync_bn=True),
        heads=[
            Submodel(
                name='classification',
                num_classes=4,
                head=ImageClassificationHead(
                    level=0,  # decoder is identity function
                    num_convs=2,
                    num_filters=256,
                    add_head_batch_norm=False,
                    dropout_rate=0.2)),
            Submodel(name='segmentation',
                     num_classes=19,
                     decoder=decoders.Decoder(
                         type='hardnet',
                         hardnet=decoders.HardNet(model_id=70)),
                     head=SegmentationHead(level=0,
                                           num_convs=0,
                                           feature_fusion=None,
                                           low_level=0,
                                           low_level_num_filters=0)),
            Submodel(name='yolo',
                     num_classes=4,
                     decoder=decoders.Decoder(type='pan',
                                              pan=decoders.PAN(levels=3)),
                     head=YoloHead(anchor_per_scale=3,
                                   strides=[16, 32, 64],
                                   anchors=[
                                       12, 16, 19, 36, 40, 28, 36, 75, 76, 55,
                                       72, 146, 142, 110, 192, 243, 459, 401
                                   ],
                                   xy_scale=[1.2, 1.1, 1.05]))
        ],
        l2_weight_decay=1e-4)

    return multi_cfg.MultiTaskExperimentConfig(
        task=multi_cfg.MultiTaskConfig(model=model_config,
                                       init_checkpoint=None,
                                       task_routines=(segmentation_routine,
                                                      classification_routine,
                                                      yolo_routine)),
        trainer=multi_cfg.MultiTaskTrainerConfig(
            trainer_type="interleaving",
            task_sampler=multi_cfg.TaskSamplingConfig(
                type="proportional",
                proportional=multi_cfg.ProportionalSampleConfig(
                    alpha=1.0)),  # uniform, proportional or annealing
            steps_per_loop=steps_per_epoch,
            summary_interval=steps_per_epoch,
            checkpoint_interval=steps_per_epoch,
            train_steps=45 * steps_per_epoch,
            validation_steps=validation_steps,
            validation_interval=steps_per_epoch,
            best_checkpoint_eval_metric='mean_iou',
            continuous_eval_timeout=3600,
            max_to_keep=5,
            optimizer_config=optimization.OptimizationConfig({
                'optimizer': {
                    'type': 'sgd',
                    'sgd': {
                        'momentum': 0.9
                    }
                },
                'learning_rate': {
                    'type': 'polynomial',
                    'polynomial': {
                        'initial_learning_rate': 0.007,
                        'decay_steps': 45 * steps_per_epoch,
                        'end_learning_rate': 0.0,
                        'power': 0.9
                    }
                },
                'warmup': {
                    'type': 'linear',
                    'linear': {
                        'warmup_steps': 5 * steps_per_epoch,
                        'warmup_learning_rate': 0
                    }
                }
            })))