Пример #1
0
 def test_raise_error_on_empty_config(self):
     losses_text_proto = """
   localization_loss {
     weighted_l2 {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     with self.assertRaises(ValueError):
         losses_builder.build(losses_proto)
Пример #2
0
 def test_build_hard_example_miner_with_non_default_values(self):
     losses_text_proto = """
   localization_loss {
     weighted_l2 {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
   hard_example_miner {
     num_hard_examples: 32
     iou_threshold: 0.5
     loss_type: LOCALIZATION
     max_negatives_per_positive: 10
     min_negatives_per_image: 3
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     _, _, _, _, hard_example_miner, _, _ = losses_builder.build(
         losses_proto)
     self.assertTrue(isinstance(hard_example_miner,
                                losses.HardExampleMiner))
     self.assertEqual(hard_example_miner._num_hard_examples, 32)
     self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5)
     self.assertEqual(hard_example_miner._max_negatives_per_positive, 10)
     self.assertEqual(hard_example_miner._min_negatives_per_image, 3)
Пример #3
0
 def test_build_reweighting_unmatched_anchors(self):
     losses_text_proto = """
   localization_loss {
     weighted_l2 {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
   hard_example_miner {
   }
   classification_weight: 0.8
   localization_weight: 0.2
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     (classification_loss, localization_loss, classification_weight,
      localization_weight, hard_example_miner, _,
      _) = losses_builder.build(losses_proto)
     self.assertTrue(isinstance(hard_example_miner,
                                losses.HardExampleMiner))
     self.assertTrue(
         isinstance(classification_loss,
                    losses.WeightedSoftmaxClassificationLoss))
     self.assertTrue(
         isinstance(localization_loss, losses.WeightedL2LocalizationLoss))
     self.assertAlmostEqual(classification_weight, 0.8)
     self.assertAlmostEqual(localization_weight, 0.2)
def _build_ssd_model(ssd_config, is_training, add_summaries):
    """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    SSDMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(
        ssd_config.feature_extractor, is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    matcher = matcher_builder.build(ssd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    encode_background_as_zeros = ssd_config.encode_background_as_zeros
    ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                    ssd_config.box_predictor,
                                                    is_training, num_classes)
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight,
     hard_example_miner) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches

    return ssd_meta_arch.SSDMetaArch(is_training,
                                     anchor_generator,
                                     ssd_box_predictor,
                                     box_coder,
                                     feature_extractor,
                                     matcher,
                                     region_similarity_calculator,
                                     encode_background_as_zeros,
                                     image_resizer_fn,
                                     non_max_suppression_fn,
                                     score_conversion_fn,
                                     classification_loss,
                                     localization_loss,
                                     classification_weight,
                                     localization_weight,
                                     normalize_loss_by_num_matches,
                                     hard_example_miner,
                                     add_summaries=add_summaries)
Пример #5
0
 def test_raise_error_when_both_focal_loss_and_hard_example_miner(self):
     losses_text_proto = """
   localization_loss {
     weighted_l2 {
     }
   }
   classification_loss {
     weighted_sigmoid_focal {
     }
   }
   hard_example_miner {
   }
   classification_weight: 0.8
   localization_weight: 0.2
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     with self.assertRaises(ValueError):
         losses_builder.build(losses_proto)
Пример #6
0
 def test_do_not_build_hard_example_miner_by_default(self):
     losses_text_proto = """
   localization_loss {
     weighted_l2 {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)
     self.assertEqual(hard_example_miner, None)
Пример #7
0
 def test_build_weighted_iou_localization_loss(self):
     losses_text_proto = """
   localization_loss {
     weighted_iou {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     _, localization_loss, _, _, _ = losses_builder.build(losses_proto)
     self.assertTrue(
         isinstance(localization_loss, losses.WeightedIOULocalizationLoss))
Пример #8
0
 def test_build_weighted_sigmoid_classification_loss(self):
     losses_text_proto = """
   classification_loss {
     weighted_sigmoid {
     }
   }
   localization_loss {
     weighted_l2 {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
     self.assertTrue(
         isinstance(classification_loss,
                    losses.WeightedSigmoidClassificationLoss))
 def test_build_weighted_smooth_l1_localization_loss_default_delta(self):
     losses_text_proto = """
   localization_loss {
     weighted_smooth_l1 {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     _, localization_loss, _, _, _ = losses_builder.build(losses_proto)
     self.assertTrue(
         isinstance(localization_loss,
                    losses.WeightedSmoothL1LocalizationLoss))
     self.assertAlmostEqual(localization_loss._delta, 1.0)
Пример #10
0
 def test_build_weighted_logits_softmax_classification_loss(self):
     losses_text_proto = """
   classification_loss {
     weighted_logits_softmax {
     }
   }
   localization_loss {
     weighted_l2 {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     classification_loss, _, _, _, _, _, _ = losses_builder.build(
         losses_proto)
     self.assertTrue(
         isinstance(classification_loss,
                    losses.WeightedSoftmaxClassificationAgainstLogitsLoss))
Пример #11
0
 def test_build_weighted_sigmoid_focal_classification_loss(self):
     losses_text_proto = """
   classification_loss {
     weighted_sigmoid_focal {
     }
   }
   localization_loss {
     weighted_l2 {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
     self.assertTrue(
         isinstance(classification_loss,
                    losses.SigmoidFocalClassificationLoss))
     self.assertAlmostEqual(classification_loss._alpha, None)
     self.assertAlmostEqual(classification_loss._gamma, 2.0)
Пример #12
0
 def test_build_hard_example_miner_for_localization_loss(self):
     losses_text_proto = """
   localization_loss {
     weighted_l2 {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
   hard_example_miner {
     loss_type: LOCALIZATION
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)
     self.assertTrue(isinstance(hard_example_miner,
                                losses.HardExampleMiner))
     self.assertEqual(hard_example_miner._loss_type, 'loc')
def _build_deeplab_model(model_config, is_training, add_summaries,
                         ignore_class):
    num_classes = model_config.num_classes
    if not num_classes:
        raise ValueError('"num_classes" must be greater than 0.')

    loss_config = model_config.loss
    classification_loss = losses_builder.build(loss_config, ignore_class)
    scale_predictions = model_config.scale_predictions  #model_config.something

    common_kwargs = {
        'is_training': is_training,
        'num_classes': num_classes,
        'classification_loss': classification_loss,
        'add_summaries': add_summaries,
        'scale_pred': scale_predictions,
        'main_loss_weight': 1.0,
        'train_reduce': model_config.train_reduce,
        'feature_extractor': model_config.feature_extractor.type,
    }
    model = deeplab_architecture.DeeplabArchitecture(**common_kwargs)
    return num_classes, model
Пример #14
0
 def test_anchorwise_output(self):
     losses_text_proto = """
   localization_loss {
     weighted_smooth_l1 {
     }
   }
   classification_loss {
     weighted_softmax {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     _, localization_loss, _, _, _ = losses_builder.build(losses_proto)
     self.assertTrue(
         isinstance(localization_loss,
                    losses.WeightedSmoothL1LocalizationLoss))
     predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0,
                                                        1.0]]])
     targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
     weights = tf.constant([[1.0, 1.0]])
     loss = localization_loss(predictions, targets, weights=weights)
     self.assertEqual(loss.shape, [1, 2])
Пример #15
0
 def test_anchorwise_output(self):
     losses_text_proto = """
   classification_loss {
     weighted_sigmoid {
       anchorwise_output: true
     }
   }
   localization_loss {
     weighted_l2 {
     }
   }
 """
     losses_proto = losses_pb2.Loss()
     text_format.Merge(losses_text_proto, losses_proto)
     classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
     self.assertTrue(
         isinstance(classification_loss,
                    losses.WeightedSigmoidClassificationLoss))
     predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]])
     targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]])
     weights = tf.constant([[1.0, 1.0]])
     loss = classification_loss(predictions, targets, weights=weights)
     self.assertEqual(loss.shape, [1, 2])
def _build_pspnet_icnet_model(model_config,
                              is_training,
                              add_summaries,
                              build_baseline_psp=False):
    num_classes = model_config.num_classes
    if not num_classes:
        raise ValueError('"num_classes" must be greater than 0.')

    in_filter_scale = model_config.filter_scale
    if in_filter_scale > 1 or in_filter_scale < 0:
        raise ValueError('"filter_scale" must be in the range (0,1].')
    filter_scale = 1.0 / in_filter_scale

    should_downsample_extractor = False
    if not build_baseline_psp:
        pretrain_single_branch_mode = model_config.pretrain_single_branch_mode
        should_downsample_extractor = not pretrain_single_branch_mode

    feature_extractor = _build_pspnet_icnet_extractor(
        model_config.feature_extractor,
        filter_scale,
        is_training,
        mid_downsample=should_downsample_extractor)

    model_arg_scope = hyperparams_builder.build(model_config.hyperparams,
                                                is_training)

    loss_config = model_config.loss
    classification_loss = (losses_builder.build(loss_config))
    use_aux_loss = loss_config.use_auxiliary_loss

    common_kwargs = {
        'is_training': is_training,
        'num_classes': num_classes,
        'model_arg_scope': model_arg_scope,
        'num_classes': num_classes,
        'feature_extractor': feature_extractor,
        'classification_loss': classification_loss,
        'use_aux_loss': use_aux_loss,
        'add_summaries': add_summaries
    }

    if not build_baseline_psp:
        if use_aux_loss:
            common_kwargs['main_loss_weight'] = (
                model_config.main_branch_loss_weight)
            common_kwargs['second_branch_loss_weight'] = (
                model_config.second_branch_loss_weight)
            common_kwargs['first_branch_loss_weight'] = (
                model_config.first_branch_loss_weight)
        model = (num_classes,
                 icnet_architecture.ICNetArchitecture(
                     filter_scale=filter_scale,
                     pretrain_single_branch_mode=pretrain_single_branch_mode,
                     **common_kwargs))
    else:
        if use_aux_loss:
            # TODO: remove hardcoded values here
            common_kwargs['main_loss_weight'] = 1.0
            common_kwargs['aux_loss_weight'] = 0.4
        model = (num_classes,
                 pspnet_architecture.PSPNetArchitecture(**common_kwargs))
    return model
Пример #17
0
def _build_ssd_model(ssd_config, is_training, add_summaries):
    """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.
  Returns:
    SSDMetaArch based on the config.

  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(
        feature_extractor_config=ssd_config.feature_extractor,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        is_training=is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    matcher = matcher_builder.build(ssd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    encode_background_as_zeros = ssd_config.encode_background_as_zeros
    negative_class_weight = ssd_config.negative_class_weight
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    if feature_extractor.is_keras_model:
        ssd_box_predictor = box_predictor_builder.build_keras(
            conv_hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
            freeze_batchnorm=ssd_config.freeze_batchnorm,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=anchor_generator.
            num_anchors_per_location(),
            box_predictor_config=ssd_config.box_predictor,
            is_training=is_training,
            num_classes=num_classes,
            add_background_class=ssd_config.add_background_class)
    else:
        ssd_box_predictor = box_predictor_builder.build(
            hyperparams_builder.build, ssd_config.box_predictor, is_training,
            num_classes, ssd_config.add_background_class)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight, hard_example_miner, random_example_sampler,
     expected_loss_weights_fn) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
    normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize

    equalization_loss_config = ops.EqualizationLossConfig(
        weight=ssd_config.loss.equalization_loss.weight,
        exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes)

    target_assigner_instance = target_assigner.TargetAssigner(
        region_similarity_calculator,
        matcher,
        box_coder,
        negative_class_weight=negative_class_weight)

    ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
    kwargs = {}

    return ssd_meta_arch_fn(
        is_training=is_training,
        anchor_generator=anchor_generator,
        box_predictor=ssd_box_predictor,
        box_coder=box_coder,
        feature_extractor=feature_extractor,
        encode_background_as_zeros=encode_background_as_zeros,
        image_resizer_fn=image_resizer_fn,
        non_max_suppression_fn=non_max_suppression_fn,
        score_conversion_fn=score_conversion_fn,
        classification_loss=classification_loss,
        localization_loss=localization_loss,
        classification_loss_weight=classification_weight,
        localization_loss_weight=localization_weight,
        normalize_loss_by_num_matches=normalize_loss_by_num_matches,
        hard_example_miner=hard_example_miner,
        target_assigner_instance=target_assigner_instance,
        add_summaries=add_summaries,
        normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
        add_background_class=ssd_config.add_background_class,
        explicit_background_class=ssd_config.explicit_background_class,
        random_example_sampler=random_example_sampler,
        expected_loss_weights_fn=expected_loss_weights_fn,
        use_confidences_as_targets=ssd_config.use_confidences_as_targets,
        implicit_example_weight=ssd_config.implicit_example_weight,
        equalization_loss_config=equalization_loss_config,
        **kwargs)