def test_raise_value_error_on_empty_anchor_genertor(self):
     anchor_generator_text_proto = """
 """
     anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
     text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
     with self.assertRaises(ValueError):
         anchor_generator_builder.build(anchor_generator_proto)
 def test_raise_value_error_on_empty_anchor_genertor(self):
   anchor_generator_text_proto = """
   """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   with self.assertRaises(ValueError):
     anchor_generator_builder.build(anchor_generator_proto)
Exemple #3
0
    def test_build_ssd_anchor_generator_without_reduced_boxes(self):
        anchor_generator_text_proto = """
      ssd_anchor_generator {
        aspect_ratios: [1.0]
        reduce_boxes_in_lowest_layer: false
      }
    """
        anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
        text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
        anchor_generator_object = anchor_generator_builder.build(
            anchor_generator_proto)
        self.assertIsInstance(
            anchor_generator_object,
            multiple_grid_anchor_generator.MultipleGridAnchorGenerator)

        for actual_scales, expected_scales in zip(
                list(anchor_generator_object._scales), [(0.2, 0.264),
                                                        (0.35, 0.418),
                                                        (0.499, 0.570),
                                                        (0.649, 0.721),
                                                        (0.799, 0.871),
                                                        (0.949, 0.974)]):
            self.assert_almost_list_equal(expected_scales,
                                          actual_scales,
                                          delta=1e-2)

        for actual_aspect_ratio, expected_aspect_ratio in zip(
                list(anchor_generator_object._aspect_ratios),
                6 * [(1.0, 1.0)]):
            self.assert_almost_list_equal(expected_aspect_ratio,
                                          actual_aspect_ratio)

        self.assertAllClose(anchor_generator_object._base_anchor_size,
                            [1.0, 1.0])
  def test_build_ssd_anchor_generator_without_reduced_boxes(self):
    anchor_generator_text_proto = """
      ssd_anchor_generator {
        aspect_ratios: [1.0]
        reduce_boxes_in_lowest_layer: false
      }
    """
    anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
    text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
    anchor_generator_object = anchor_generator_builder.build(
        anchor_generator_proto)
    self.assertTrue(isinstance(anchor_generator_object,
                               multiple_grid_anchor_generator.
                               MultipleGridAnchorGenerator))

    for actual_scales, expected_scales in zip(
        list(anchor_generator_object._scales),
        [(0.2, 0.264),
         (0.35, 0.418),
         (0.499, 0.570),
         (0.649, 0.721),
         (0.799, 0.871),
         (0.949, 0.974)]):
      self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)

    for actual_aspect_ratio, expected_aspect_ratio in zip(
        list(anchor_generator_object._aspect_ratios),
        6 * [(1.0, 1.0)]):
      self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)

    self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
 def test_build_grid_anchor_generator_with_non_default_parameters(self):
   anchor_generator_text_proto = """
     grid_anchor_generator {
       height: 128
       width: 512
       height_stride: 10
       width_stride: 20
       height_offset: 30
       width_offset: 40
       scales: [0.4, 2.2]
       aspect_ratios: [0.3, 4.5]
     }
    """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertTrue(isinstance(anchor_generator_object,
                              grid_anchor_generator.GridAnchorGenerator))
   self.assert_almost_list_equal(anchor_generator_object._scales,
                                 [0.4, 2.2])
   self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,
                                 [0.3, 4.5])
   with self.test_session() as sess:
     base_anchor_size, anchor_offset, anchor_stride = sess.run(
         [anchor_generator_object._base_anchor_size,
          anchor_generator_object._anchor_offset,
          anchor_generator_object._anchor_stride])
   self.assertAllEqual(anchor_offset, [30, 40])
   self.assertAllEqual(anchor_stride, [10, 20])
   self.assertAllEqual(base_anchor_size, [128, 512])
  def test_build_ssd_anchor_generator_with_defaults(self):
    anchor_generator_text_proto = """
      ssd_anchor_generator {
        aspect_ratios: [1.0]
      }
    """
    anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
    text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
    anchor_generator_object = anchor_generator_builder.build(
        anchor_generator_proto)
    self.assertTrue(isinstance(anchor_generator_object,
                               multiple_grid_anchor_generator.
                               MultipleGridAnchorGenerator))
    for actual_scales, expected_scales in zip(
        list(anchor_generator_object._scales),
        [(0.1, 0.2, 0.2),
         (0.35, 0.418),
         (0.499, 0.570),
         (0.649, 0.721),
         (0.799, 0.871),
         (0.949, 0.974)]):
      self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
    for actual_aspect_ratio, expected_aspect_ratio in zip(
        list(anchor_generator_object._aspect_ratios),
        [(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):
      self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)

    with self.test_session() as sess:
      base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
    self.assertAllClose(base_anchor_size, [1.0, 1.0])
    def test_build_ssd_anchor_generator_with_non_default_parameters(self):
        anchor_generator_text_proto = """
      ssd_anchor_generator {
        num_layers: 2
        min_scale: 0.3
        max_scale: 0.8
        aspect_ratios: [2.0]
      }
    """
        anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
        text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
        anchor_generator_object = anchor_generator_builder.build(
            anchor_generator_proto)
        self.assertTrue(
            isinstance(
                anchor_generator_object,
                multiple_grid_anchor_generator.MultipleGridAnchorGenerator))

        for actual_scales, expected_scales in zip(
                list(anchor_generator_object._scales), [(0.1, 0.3, 0.3),
                                                        (0.8, )]):
            self.assert_almost_list_equal(expected_scales,
                                          actual_scales,
                                          delta=1e-2)

        for actual_aspect_ratio, expected_aspect_ratio in zip(
                list(anchor_generator_object._aspect_ratios), [(1.0, 2.0, 0.5),
                                                               (2.0, )]):
            self.assert_almost_list_equal(expected_aspect_ratio,
                                          actual_aspect_ratio)

        with self.test_session() as sess:
            base_anchor_size = sess.run(
                anchor_generator_object._base_anchor_size)
        self.assertAllClose(base_anchor_size, [1.0, 1.0])
 def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):
     anchor_generator_text_proto = """
   multiscale_anchor_generator {
     aspect_ratios: [1.0]
   }
 """
     anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
     text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
     anchor_generator_object = anchor_generator_builder.build(
         anchor_generator_proto)
     self.assertTrue(
         isinstance(
             anchor_generator_object, multiscale_grid_anchor_generator.
             MultiscaleGridAnchorGenerator))
     for level, anchor_grid_info in zip(
             range(3, 8), anchor_generator_object._anchor_grid_info):
         self.assertEqual(set(anchor_grid_info.keys()),
                          set(['level', 'info']))
         self.assertTrue(level, anchor_grid_info['level'])
         self.assertEqual(len(anchor_grid_info['info']), 4)
         self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])
         self.assertTrue(anchor_grid_info['info'][1], 1.0)
         self.assertAllClose(anchor_grid_info['info'][2],
                             [4.0 * 2**level, 4.0 * 2**level])
         self.assertAllClose(anchor_grid_info['info'][3],
                             [2**level, 2**level])
 def test_build_grid_anchor_generator_with_non_default_parameters(self):
     anchor_generator_text_proto = """
   grid_anchor_generator {
     height: 128
     width: 512
     height_stride: 10
     width_stride: 20
     height_offset: 30
     width_offset: 40
     scales: [0.4, 2.2]
     aspect_ratios: [0.3, 4.5]
   }
  """
     anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
     text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
     anchor_generator_object = anchor_generator_builder.build(
         anchor_generator_proto)
     self.assertTrue(
         isinstance(anchor_generator_object,
                    grid_anchor_generator.GridAnchorGenerator))
     self.assert_almost_list_equal(anchor_generator_object._scales,
                                   [0.4, 2.2])
     self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,
                                   [0.3, 4.5])
     with self.test_session() as sess:
         base_anchor_size, anchor_offset, anchor_stride = sess.run([
             anchor_generator_object._base_anchor_size,
             anchor_generator_object._anchor_offset,
             anchor_generator_object._anchor_stride
         ])
     self.assertAllEqual(anchor_offset, [30, 40])
     self.assertAllEqual(anchor_stride, [10, 20])
     self.assertAllEqual(base_anchor_size, [128, 512])
    def test_build_ssd_anchor_generator_with_defaults(self):
        anchor_generator_text_proto = """
      ssd_anchor_generator {
        aspect_ratios: [1.0]
      }
    """
        anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
        text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
        anchor_generator_object = anchor_generator_builder.build(
            anchor_generator_proto)
        self.assertTrue(
            isinstance(
                anchor_generator_object,
                multiple_grid_anchor_generator.MultipleGridAnchorGenerator))
        for actual_scales, expected_scales in zip(
                list(anchor_generator_object._scales), [(0.1, 0.2, 0.2),
                                                        (0.35, 0.418),
                                                        (0.499, 0.570),
                                                        (0.649, 0.721),
                                                        (0.799, 0.871),
                                                        (0.949, 0.974)]):
            self.assert_almost_list_equal(expected_scales,
                                          actual_scales,
                                          delta=1e-2)
        for actual_aspect_ratio, expected_aspect_ratio in zip(
                list(anchor_generator_object._aspect_ratios),
            [(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):
            self.assert_almost_list_equal(expected_aspect_ratio,
                                          actual_aspect_ratio)

        with self.test_session() as sess:
            base_anchor_size = sess.run(
                anchor_generator_object._base_anchor_size)
        self.assertAllClose(base_anchor_size, [1.0, 1.0])
def _build_ssd_model(ssd_config, is_training):
    """Builds an SSD detection model based on the model config.

    Args:
      ssd_config: A ssd.proto object containing the config for the desired
        SSDMetaArch.
      is_training: True if this model is being built for training purposes.

    Returns:
      SSDMetaArch based on the config.
    Raises:
      ValueError: If ssd_config.type is not recognized (i.e. not registered in
        model_class_map).
    """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(ssd_config.feature_extractor,
                                                     is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    # matcher contains a method named "match" to return a "Match" Object.
    matcher = matcher_builder.build(ssd_config.matcher)
    # region_similarity_calculator.compare: return a tensor with shape [N, M] representing the IOA/IOU score, etc.
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    # ssd_box_predictor.predict: returns a prediction dictionary
    ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                    ssd_config.box_predictor,
                                                    is_training, num_classes)

    # anchor_generator: is MultipleGridAnchorGenerator object are always in normalized coordinate
    # Usage: anchor_generator.generate: Generates a collection of bounding boxes to be used as anchors.
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight,
     hard_example_miner) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches

    return ssd_meta_arch.SSDMetaArch(
        is_training,
        anchor_generator,
        ssd_box_predictor,
        box_coder,
        feature_extractor,
        matcher,
        region_similarity_calculator,
        image_resizer_fn,
        non_max_suppression_fn,
        score_conversion_fn,
        classification_loss,
        localization_loss,
        classification_weight,
        localization_weight,
        normalize_loss_by_num_matches,
        hard_example_miner)
  def test_build_ssd_anchor_generator_with_non_default_parameters(self):
    anchor_generator_text_proto = """
      ssd_anchor_generator {
        num_layers: 2
        min_scale: 0.3
        max_scale: 0.8
        aspect_ratios: [2.0]
      }
    """
    anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
    text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
    anchor_generator_object = anchor_generator_builder.build(
        anchor_generator_proto)
    self.assertTrue(isinstance(anchor_generator_object,
                               multiple_grid_anchor_generator.
                               MultipleGridAnchorGenerator))

    for actual_scales, expected_scales in zip(
        list(anchor_generator_object._scales),
        [(0.1, 0.3, 0.3), (0.8,)]):
      self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)

    for actual_aspect_ratio, expected_aspect_ratio in zip(
        list(anchor_generator_object._aspect_ratios),
        [(1.0, 2.0, 0.5), (2.0,)]):
      self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)

    with self.test_session() as sess:
      base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
    self.assertAllClose(base_anchor_size, [1.0, 1.0])
Exemple #13
0
def _build_ssd_model(ssd_config, is_training, add_summaries):
  """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    SSDMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = ssd_config.num_classes

  # Feature extractor
  feature_extractor = _build_ssd_feature_extractor(ssd_config.feature_extractor,
                                                   is_training)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)
  encode_background_as_zeros = ssd_config.encode_background_as_zeros
  ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                  ssd_config.box_predictor,
                                                  is_training, num_classes)
  anchor_generator = anchor_generator_builder.build(
      ssd_config.anchor_generator)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight,
   hard_example_miner) = losses_builder.build(ssd_config.loss)
  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches

  return ssd_meta_arch.SSDMetaArch(
      is_training,
      anchor_generator,
      ssd_box_predictor,
      box_coder,
      feature_extractor,
      matcher,
      region_similarity_calculator,
      encode_background_as_zeros,
      image_resizer_fn,
      non_max_suppression_fn,
      score_conversion_fn,
      classification_loss,
      localization_loss,
      classification_weight,
      localization_weight,
      normalize_loss_by_num_matches,
      hard_example_miner,
      add_summaries=add_summaries)
Exemple #14
0
def _build_ssd_model(ssd_config, is_training, add_summaries):
  """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    SSDMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = ssd_config.num_classes

  # Feature extractor
  feature_extractor = _build_ssd_feature_extractor(ssd_config.feature_extractor,
                                                   is_training)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)
  ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                  ssd_config.box_predictor,
                                                  is_training, num_classes)
  anchor_generator = anchor_generator_builder.build(
      ssd_config.anchor_generator)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight,
   hard_example_miner) = losses_builder.build(ssd_config.loss)
  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches

  return ssd_meta_arch.SSDMetaArch(
      is_training,
      anchor_generator,
      ssd_box_predictor,
      box_coder,
      feature_extractor,
      matcher,
      region_similarity_calculator,
      image_resizer_fn,
      non_max_suppression_fn,
      score_conversion_fn,
      classification_loss,
      localization_loss,
      classification_weight,
      localization_weight,
      normalize_loss_by_num_matches,
      hard_example_miner,
      add_summaries=add_summaries)
def _build_sssfd_model(sssfd_config,
                       is_training,
                       add_summaries,
                       add_background_class=True):
    num_classes = sssfd_config.num_classes

    # Feature extractor
    feature_extractor = _build_sssfd_feature_extractor(
        feature_extractor_config=sssfd_config.feature_extractor,
        is_training=is_training)

    box_coder = box_coder_builder.build(sssfd_config.box_coder)
    matcher = matcher_builder.build(sssfd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        sssfd_config.similarity_calculator)
    encode_background_as_zeros = sssfd_config.encode_background_as_zeros
    negative_class_weight = sssfd_config.negative_class_weight
    sssfd_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build, sssfd_config.box_predictor, is_training,
        num_classes)
    anchor_generator = anchor_generator_builder.build(
        sssfd_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(sssfd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        sssfd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight, hard_example_miner,
     random_example_sampler) = losses_builder.build(sssfd_config.loss)
    normalize_loss_by_num_matches = sssfd_config.normalize_loss_by_num_matches
    normalize_loc_loss_by_codesize = sssfd_config.normalize_loc_loss_by_codesize

    return ssd_meta_arch.SSDMetaArch(
        is_training,
        anchor_generator,
        sssfd_box_predictor,
        box_coder,
        feature_extractor,
        matcher,
        region_similarity_calculator,
        encode_background_as_zeros,
        negative_class_weight,
        image_resizer_fn,
        non_max_suppression_fn,
        score_conversion_fn,
        classification_loss,
        localization_loss,
        classification_weight,
        localization_weight,
        normalize_loss_by_num_matches,
        hard_example_miner,
        add_summaries=add_summaries,
        normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
        freeze_batchnorm=sssfd_config.freeze_batchnorm,
        inplace_batchnorm_update=sssfd_config.inplace_batchnorm_update,
        add_background_class=add_background_class,
        random_example_sampler=random_example_sampler)
Exemple #16
0
    def test_build_flexible_anchor_generator(self):
        anchor_generator_text_proto = """
      flexible_grid_anchor_generator {
        anchor_grid {
          base_sizes: [1.5]
          aspect_ratios: [1.0]
          height_stride: 16
          width_stride: 20
          height_offset: 8
          width_offset: 9
        }
        anchor_grid {
          base_sizes: [1.0, 2.0]
          aspect_ratios: [1.0, 0.5]
          height_stride: 32
          width_stride: 30
          height_offset: 10
          width_offset: 11
        }
      }
    """
        anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
        text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
        anchor_generator_object = anchor_generator_builder.build(
            anchor_generator_proto)
        self.assertIsInstance(
            anchor_generator_object,
            flexible_grid_anchor_generator.FlexibleGridAnchorGenerator)

        for actual_base_sizes, expected_base_sizes in zip(
                list(anchor_generator_object._base_sizes), [(1.5, ),
                                                            (1.0, 2.0)]):
            self.assert_almost_list_equal(expected_base_sizes,
                                          actual_base_sizes)

        for actual_aspect_ratios, expected_aspect_ratios in zip(
                list(anchor_generator_object._aspect_ratios), [(1.0, ),
                                                               (1.0, 0.5)]):
            self.assert_almost_list_equal(expected_aspect_ratios,
                                          actual_aspect_ratios)

        for actual_strides, expected_strides in zip(
                list(anchor_generator_object._anchor_strides), [(16, 20),
                                                                (32, 30)]):
            self.assert_almost_list_equal(expected_strides, actual_strides)

        for actual_offsets, expected_offsets in zip(
                list(anchor_generator_object._anchor_offsets), [(8, 9),
                                                                (10, 11)]):
            self.assert_almost_list_equal(expected_offsets, actual_offsets)

        self.assertTrue(anchor_generator_object._normalize_coordinates)
    def test_build_ssd_anchor_generator_with_non_default_parameters(self):
        anchor_generator_text_proto = """
      ssd_anchor_generator {
        num_layers: 2
        min_scale: 0.3
        max_scale: 0.8
        aspect_ratios: [2.0]
        height_stride: 16
        height_stride: 32
        width_stride: 20
        width_stride: 30
        height_offset: 8
        height_offset: 16
        width_offset: 0
        width_offset: 10
      }
    """
        anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
        text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
        anchor_generator_object = anchor_generator_builder.build(
            anchor_generator_proto)
        self.assertTrue(
            isinstance(
                anchor_generator_object,
                multiple_grid_anchor_generator.MultipleGridAnchorGenerator))

        for actual_scales, expected_scales in zip(
                list(anchor_generator_object._scales), [(0.1, 0.3, 0.3),
                                                        (0.8, 0.894)]):
            self.assert_almost_list_equal(expected_scales,
                                          actual_scales,
                                          delta=1e-2)

        for actual_aspect_ratio, expected_aspect_ratio in zip(
                list(anchor_generator_object._aspect_ratios), [(1.0, 2.0, 0.5),
                                                               (2.0, 1.0)]):
            self.assert_almost_list_equal(expected_aspect_ratio,
                                          actual_aspect_ratio)

        for actual_strides, expected_strides in zip(
                list(anchor_generator_object._anchor_strides), [(16, 20),
                                                                (32, 30)]):
            self.assert_almost_list_equal(expected_strides, actual_strides)

        for actual_offsets, expected_offsets in zip(
                list(anchor_generator_object._anchor_offsets), [(8, 0),
                                                                (16, 10)]):
            self.assert_almost_list_equal(expected_offsets, actual_offsets)

        self.assertAllClose(anchor_generator_object._base_anchor_size,
                            [1.0, 1.0])
Exemple #18
0
def _build_faster_rcnn_model(frcnn_config, is_training): #parameters are config of the faster RCNN
  """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
    desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.

  Returns:
    FasterRCNNMetaArch based on the config.
  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).

  """
#The config file consist of the the model parammeters 



  num_classes = frcnn_config.num_classes  #getting the classes 
  image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) #returns - image_resizer_fn: Callable for image resizing

  feature_extractor = _build_faster_rcnn_feature_extractor(  #create the feature extractor 
      frcnn_config.feature_extractor, is_training)      #this will take the part of the resnet as a feature extrator 

  first_stage_only = frcnn_config.first_stage_only #No field in faser Rcnn config file Since this is fale this is comple faster Rcnn
  first_stage_anchor_generator = anchor_generator_builder.build(     #here the anchor generator model preparation 
      frcnn_config.first_stage_anchor_generator)  #here inside the model we get the first_stage_anchor_generator and with that we go to the it's params 


#In above 3 outputs we get 3 functions capable of doing aboive tasks !


  first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate  #not in the config file 

  first_stage_box_predictor_arg_scope = hyperparams_builder.build(    #hyper parameters builder for first stage  rpn network 
      frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) 

  first_stage_box_predictor_kernel_size = (   #This predicts the first stage conv window on the feature map of RON 
      frcnn_config.first_stage_box_predictor_kernel_size)  #not given 


  first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth  #not given #Output depth for the convolution op just prior to RPN box predictions
  first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size # What is the bathc size 

  first_stage_positive_balance_fraction = (                 #balance of the positive examples any way not given 
      frcnn_config.first_stage_positive_balance_fraction)
 def test_build_grid_anchor_generator_with_defaults(self):
   anchor_generator_text_proto = """
     grid_anchor_generator {
     }
    """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertTrue(isinstance(anchor_generator_object,
                              grid_anchor_generator.GridAnchorGenerator))
   self.assertListEqual(anchor_generator_object._scales, [])
   self.assertListEqual(anchor_generator_object._aspect_ratios, [])
   self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0])
   self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16])
   self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256])
 def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(
     self):
   anchor_generator_text_proto = """
     multiscale_anchor_generator {
       aspect_ratios: [1.0]
       normalize_coordinates: false
     }
   """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertTrue(isinstance(anchor_generator_object,
                              multiscale_grid_anchor_generator.
                              MultiscaleGridAnchorGenerator))
   self.assertFalse(anchor_generator_object._normalize_coordinates)
Exemple #21
0
 def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(
         self):
     anchor_generator_text_proto = """
   multiscale_anchor_generator {
     aspect_ratios: [1.0]
     normalize_coordinates: false
   }
 """
     anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
     text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
     anchor_generator_object = anchor_generator_builder.build(
         anchor_generator_proto)
     self.assertIsInstance(
         anchor_generator_object,
         multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator)
     self.assertFalse(anchor_generator_object._normalize_coordinates)
 def test_build_grid_anchor_generator_with_defaults(self):
   anchor_generator_text_proto = """
     grid_anchor_generator {
     }
    """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertIsInstance(anchor_generator_object,
                         grid_anchor_generator.GridAnchorGenerator)
   self.assertListEqual(anchor_generator_object._scales, [])
   self.assertListEqual(anchor_generator_object._aspect_ratios, [])
   self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0])
   self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16])
   self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256])
  def test_build_ssd_anchor_generator_with_non_default_parameters(self):
    anchor_generator_text_proto = """
      ssd_anchor_generator {
        num_layers: 2
        min_scale: 0.3
        max_scale: 0.8
        aspect_ratios: [2.0]
        height_stride: 16
        height_stride: 32
        width_stride: 20
        width_stride: 30
        height_offset: 8
        height_offset: 16
        width_offset: 0
        width_offset: 10
      }
    """
    anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
    text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
    anchor_generator_object = anchor_generator_builder.build(
        anchor_generator_proto)
    self.assertTrue(isinstance(anchor_generator_object,
                               multiple_grid_anchor_generator.
                               MultipleGridAnchorGenerator))

    for actual_scales, expected_scales in zip(
        list(anchor_generator_object._scales),
        [(0.1, 0.3, 0.3), (0.8, 0.894)]):
      self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)

    for actual_aspect_ratio, expected_aspect_ratio in zip(
        list(anchor_generator_object._aspect_ratios),
        [(1.0, 2.0, 0.5), (2.0, 1.0)]):
      self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)

    for actual_strides, expected_strides in zip(
        list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
      self.assert_almost_list_equal(expected_strides, actual_strides)

    for actual_offsets, expected_offsets in zip(
        list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]):
      self.assert_almost_list_equal(expected_offsets, actual_offsets)

    self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
 def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):
   anchor_generator_text_proto = """
     ssd_anchor_generator {
       aspect_ratios: [0.5]
       interpolated_scale_aspect_ratio: 0.5
       reduce_boxes_in_lowest_layer: false
     }
   """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertIsInstance(anchor_generator_object,
                         multiple_grid_anchor_generator.
                         MultipleGridAnchorGenerator)
   for actual_aspect_ratio, expected_aspect_ratio in zip(
       list(anchor_generator_object._aspect_ratios),
       6 * [(0.5, 0.5)]):
     self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
def _build_yolo_model(yolo_config, is_training):
    """Builds an YOLO detection model based on the model config.

  Args:
    yolo_config: A yolo.proto object containing the config for the desired
      YOLOMetaArch.
    is_training: True if this model is being built for training purposes.

  Returns:
    YOLOMetaArch based on the config.
  Raises:
    ValueError: If yolo_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = yolo_config.num_classes

    # Feature extractor
    feature_extractor = _build_yolo_feature_extractor(
        yolo_config.feature_extractor, is_training)

    box_coder = box_coder_builder.build(yolo_config.box_coder)
    matcher = matcher_builder.build(yolo_config.matcher)
    region_similarity_calculator = sim_calc.build(
        yolo_config.similarity_calculator)
    yolo_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                     yolo_config.box_predictor,
                                                     is_training, num_classes)
    anchor_generator = anchor_generator_builder.build(
        yolo_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(yolo_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        yolo_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight,
     hard_example_miner) = losses_builder.build(yolo_config.loss)
    normalize_loss_by_num_matches = yolo_config.normalize_loss_by_num_matches

    return yolo_meta_arch.YOLOMetaArch(
        is_training, anchor_generator, yolo_box_predictor, box_coder,
        feature_extractor, matcher, region_similarity_calculator,
        image_resizer_fn, non_max_suppression_fn, score_conversion_fn,
        classification_loss, localization_loss, classification_weight,
        localization_weight, normalize_loss_by_num_matches, hard_example_miner)
 def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):
   anchor_generator_text_proto = """
     ssd_anchor_generator {
       aspect_ratios: [0.5]
       interpolated_scale_aspect_ratio: 0.5
       reduce_boxes_in_lowest_layer: false
     }
   """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertTrue(isinstance(anchor_generator_object,
                              multiple_grid_anchor_generator.
                              MultipleGridAnchorGenerator))
   for actual_aspect_ratio, expected_aspect_ratio in zip(
       list(anchor_generator_object._aspect_ratios),
       6 * [(0.5, 0.5)]):
     self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
Exemple #27
0
def _build_east_model(east_config, is_training):
    """Builds an EAST detection model based on the model config.

  Args:
    east_config: A east.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.

  Returns:
    EASTMetaArch based on the config.
  Raises:
    ValueError: If east_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = east_config.num_classes

    # Feature extractor
    feature_extractor = _build_east_feature_extractor(
        east_config.feature_extractor, is_training)

    box_coder = box_coder_builder.build(east_config.box_coder)
    box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                east_config.box_predictor,
                                                is_training, num_classes)
    anchor_generator = anchor_generator_builder.build(
        east_config.anchor_generator)
    #image_resizer_fn = image_resizer_builder.build(east_config.image_resizer)
    image_resizer_fn = None
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        east_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight,
     hard_example_miner) = losses_builder.build(east_config.loss)
    normalize_loss_by_num_matches = east_config.normalize_loss_by_num_matches

    return east_meta_arch.EASTMetaArch(
        is_training, anchor_generator, box_predictor, box_coder,
        feature_extractor, image_resizer_fn, non_max_suppression_fn,
        score_conversion_fn, classification_loss, localization_loss,
        classification_weight, localization_weight,
        normalize_loss_by_num_matches)
Exemple #28
0
 def test_build_ssd_anchor_generator_with_custom_scales(self):
     anchor_generator_text_proto = """
   ssd_anchor_generator {
     aspect_ratios: [1.0]
     scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
     reduce_boxes_in_lowest_layer: false
   }
 """
     anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
     text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
     anchor_generator_object = anchor_generator_builder.build(
         anchor_generator_proto)
     self.assertIsInstance(
         anchor_generator_object,
         multiple_grid_anchor_generator.MultipleGridAnchorGenerator)
     for actual_scales, expected_scales in zip(
             list(anchor_generator_object._scales),
         [(0.1, math.sqrt(0.1 * 0.15)), (0.15, math.sqrt(0.15 * 0.2)),
          (0.2, math.sqrt(0.2 * 0.4)), (0.4, math.sqrt(0.4 * 0.6)),
          (0.6, math.sqrt(0.6 * 0.8)), (0.8, math.sqrt(0.8 * 1.0))]):
         self.assert_almost_list_equal(expected_scales,
                                       actual_scales,
                                       delta=1e-2)
 def test_build_ssd_anchor_generator_with_custom_scales(self):
   anchor_generator_text_proto = """
     ssd_anchor_generator {
       aspect_ratios: [1.0]
       scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
       reduce_boxes_in_lowest_layer: false
     }
   """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertTrue(isinstance(anchor_generator_object,
                              multiple_grid_anchor_generator.
                              MultipleGridAnchorGenerator))
   for actual_scales, expected_scales in zip(
       list(anchor_generator_object._scales),
       [(0.1, math.sqrt(0.1 * 0.15)),
        (0.15, math.sqrt(0.15 * 0.2)),
        (0.2, math.sqrt(0.2 * 0.4)),
        (0.4, math.sqrt(0.4 * 0.6)),
        (0.6, math.sqrt(0.6 * 0.8)),
        (0.8, math.sqrt(0.8 * 1.0))]):
     self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
 def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):
   anchor_generator_text_proto = """
     multiscale_anchor_generator {
       aspect_ratios: [1.0]
     }
   """
   anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
   text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
   anchor_generator_object = anchor_generator_builder.build(
       anchor_generator_proto)
   self.assertTrue(isinstance(anchor_generator_object,
                              multiscale_grid_anchor_generator.
                              MultiscaleGridAnchorGenerator))
   for level, anchor_grid_info in zip(
       range(3, 8), anchor_generator_object._anchor_grid_info):
     self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info']))
     self.assertTrue(level, anchor_grid_info['level'])
     self.assertEqual(len(anchor_grid_info['info']), 4)
     self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])
     self.assertTrue(anchor_grid_info['info'][1], 1.0)
     self.assertAllClose(anchor_grid_info['info'][2],
                         [4.0 * 2**level, 4.0 * 2**level])
     self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level])
     self.assertTrue(anchor_generator_object._normalize_coordinates)
def _build_ssd_model(ssd_config,
                     is_training,
                     add_summaries,
                     add_background_class=True):
    """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.
    add_background_class: Whether to add an implicit background class to one-hot
      encodings of groundtruth labels. Set to false if using groundtruth labels
      with an explicit background class or using multiclass scores instead of
      truth in the case of distillation.
  Returns:
    SSDMetaArch based on the config.

  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(
        feature_extractor_config=ssd_config.feature_extractor,
        is_training=is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    matcher = matcher_builder.build(ssd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    encode_background_as_zeros = ssd_config.encode_background_as_zeros
    negative_class_weight = ssd_config.negative_class_weight
    ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                    ssd_config.box_predictor,
                                                    is_training, num_classes)
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight, hard_example_miner,
     random_example_sampler) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
    normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize

    return ssd_meta_arch.SSDMetaArch(
        is_training,
        anchor_generator,
        ssd_box_predictor,
        box_coder,
        feature_extractor,
        matcher,
        region_similarity_calculator,
        encode_background_as_zeros,
        negative_class_weight,
        image_resizer_fn,
        non_max_suppression_fn,
        score_conversion_fn,
        classification_loss,
        localization_loss,
        classification_weight,
        localization_weight,
        normalize_loss_by_num_matches,
        hard_example_miner,
        add_summaries=add_summaries,
        normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
        add_background_class=add_background_class,
        random_example_sampler=random_example_sampler)
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
    """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
      desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    FasterRCNNMetaArch based on the config.

  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = frcnn_config.num_classes
    image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

    feature_extractor = _build_faster_rcnn_feature_extractor(
        frcnn_config.feature_extractor, is_training,
        frcnn_config.inplace_batchnorm_update)

    number_of_stages = frcnn_config.number_of_stages
    first_stage_anchor_generator = anchor_generator_builder.build(
        frcnn_config.first_stage_anchor_generator)

    first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
    first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
        frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
    first_stage_box_predictor_kernel_size = (
        frcnn_config.first_stage_box_predictor_kernel_size)
    first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
    first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
    first_stage_positive_balance_fraction = (
        frcnn_config.first_stage_positive_balance_fraction)
    first_stage_nms_score_threshold = frcnn_config.first_stage_nms_score_threshold
    first_stage_nms_iou_threshold = frcnn_config.first_stage_nms_iou_threshold
    first_stage_max_proposals = frcnn_config.first_stage_max_proposals
    first_stage_loc_loss_weight = (
        frcnn_config.first_stage_localization_loss_weight)
    first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

    initial_crop_size = frcnn_config.initial_crop_size
    maxpool_kernel_size = frcnn_config.maxpool_kernel_size
    maxpool_stride = frcnn_config.maxpool_stride

    second_stage_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build,
        frcnn_config.second_stage_box_predictor,
        is_training=is_training,
        num_classes=num_classes)
    second_stage_batch_size = frcnn_config.second_stage_batch_size
    second_stage_balance_fraction = frcnn_config.second_stage_balance_fraction
    (second_stage_non_max_suppression_fn,
     second_stage_score_conversion_fn) = post_processing_builder.build(
         frcnn_config.second_stage_post_processing)
    second_stage_localization_loss_weight = (
        frcnn_config.second_stage_localization_loss_weight)
    second_stage_classification_loss = (
        losses_builder.build_faster_rcnn_classification_loss(
            frcnn_config.second_stage_classification_loss))
    second_stage_classification_loss_weight = (
        frcnn_config.second_stage_classification_loss_weight)
    second_stage_mask_prediction_loss_weight = (
        frcnn_config.second_stage_mask_prediction_loss_weight)

    hard_example_miner = None
    if frcnn_config.HasField('hard_example_miner'):
        hard_example_miner = losses_builder.build_hard_example_miner(
            frcnn_config.hard_example_miner,
            second_stage_classification_loss_weight,
            second_stage_localization_loss_weight)

    common_kwargs = {
        'is_training': is_training,
        'num_classes': num_classes,
        'image_resizer_fn': image_resizer_fn,
        'feature_extractor': feature_extractor,
        'number_of_stages': number_of_stages,
        'first_stage_anchor_generator': first_stage_anchor_generator,
        'first_stage_atrous_rate': first_stage_atrous_rate,
        'first_stage_box_predictor_arg_scope_fn':
        first_stage_box_predictor_arg_scope_fn,
        'first_stage_box_predictor_kernel_size':
        first_stage_box_predictor_kernel_size,
        'first_stage_box_predictor_depth': first_stage_box_predictor_depth,
        'first_stage_minibatch_size': first_stage_minibatch_size,
        'first_stage_positive_balance_fraction':
        first_stage_positive_balance_fraction,
        'first_stage_nms_score_threshold': first_stage_nms_score_threshold,
        'first_stage_nms_iou_threshold': first_stage_nms_iou_threshold,
        'first_stage_max_proposals': first_stage_max_proposals,
        'first_stage_localization_loss_weight': first_stage_loc_loss_weight,
        'first_stage_objectness_loss_weight': first_stage_obj_loss_weight,
        'second_stage_batch_size': second_stage_batch_size,
        'second_stage_balance_fraction': second_stage_balance_fraction,
        'second_stage_non_max_suppression_fn':
        second_stage_non_max_suppression_fn,
        'second_stage_score_conversion_fn': second_stage_score_conversion_fn,
        'second_stage_localization_loss_weight':
        second_stage_localization_loss_weight,
        'second_stage_classification_loss': second_stage_classification_loss,
        'second_stage_classification_loss_weight':
        second_stage_classification_loss_weight,
        'hard_example_miner': hard_example_miner,
        'add_summaries': add_summaries
    }

    if isinstance(second_stage_box_predictor, box_predictor.RfcnBoxPredictor):
        return rfcn_meta_arch.RFCNMetaArch(
            second_stage_rfcn_box_predictor=second_stage_box_predictor,
            **common_kwargs)
    else:
        return faster_rcnn_meta_arch.FasterRCNNMetaArch(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            second_stage_mask_prediction_loss_weight=(
                second_stage_mask_prediction_loss_weight),
            **common_kwargs)
Exemple #33
0
def _build_faster_rcnn_model(frcnn_config,
                             is_training,
                             add_summaries,
                             meta_architecture='faster_rcnn'):
    """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
      desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    FasterRCNNMetaArch based on the config.

  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = frcnn_config.num_classes
    image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

    feature_extractor = _build_faster_rcnn_feature_extractor(
        frcnn_config.feature_extractor, is_training,
        frcnn_config.inplace_batchnorm_update)

    number_of_stages = frcnn_config.number_of_stages
    first_stage_anchor_generator = anchor_generator_builder.build(
        frcnn_config.first_stage_anchor_generator)

    first_stage_target_assigner = target_assigner.create_target_assigner(
        'FasterRCNN',
        'proposal',
        use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
    first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
    first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
        frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
    first_stage_box_predictor_kernel_size = (
        frcnn_config.first_stage_box_predictor_kernel_size)
    first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
    first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
    # TODO(bhattad): When eval is supported using static shapes, add separate
    # use_static_shapes_for_trainig and use_static_shapes_for_evaluation.
    use_static_shapes = frcnn_config.use_static_shapes and is_training
    first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
        positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
        is_static=frcnn_config.use_static_balanced_label_sampler
        and is_training)
    first_stage_max_proposals = frcnn_config.first_stage_max_proposals
    first_stage_proposals_path = frcnn_config.first_stage_proposals_path
    if (frcnn_config.first_stage_nms_iou_threshold < 0
            or frcnn_config.first_stage_nms_iou_threshold > 1.0):
        raise ValueError('iou_threshold not in [0, 1.0].')
    if (is_training and
            frcnn_config.second_stage_batch_size > first_stage_max_proposals):
        raise ValueError('second_stage_batch_size should be no greater than '
                         'first_stage_max_proposals.')
    first_stage_non_max_suppression_fn = functools.partial(
        post_processing.batch_multiclass_non_max_suppression,
        score_thresh=frcnn_config.first_stage_nms_score_threshold,
        iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
        max_size_per_class=frcnn_config.first_stage_max_proposals,
        max_total_size=frcnn_config.first_stage_max_proposals,
        use_static_shapes=use_static_shapes and is_training)
    first_stage_loc_loss_weight = (
        frcnn_config.first_stage_localization_loss_weight)
    first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

    initial_crop_size = frcnn_config.initial_crop_size
    maxpool_kernel_size = frcnn_config.maxpool_kernel_size
    maxpool_stride = frcnn_config.maxpool_stride

    second_stage_target_assigner = target_assigner.create_target_assigner(
        'FasterRCNN',
        'detection',
        use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher,
        iou_threshold=frcnn_config.second_stage_target_iou_threshold)
    second_stage_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build,
        frcnn_config.second_stage_box_predictor,
        is_training=is_training,
        num_classes=num_classes)
    second_stage_batch_size = frcnn_config.second_stage_batch_size
    second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
        positive_fraction=frcnn_config.second_stage_balance_fraction,
        is_static=frcnn_config.use_static_balanced_label_sampler
        and is_training)
    (second_stage_non_max_suppression_fn,
     second_stage_score_conversion_fn) = post_processing_builder.build(
         frcnn_config.second_stage_post_processing)
    second_stage_localization_loss_weight = (
        frcnn_config.second_stage_localization_loss_weight)
    second_stage_classification_loss = (
        losses_builder.build_faster_rcnn_classification_loss(
            frcnn_config.second_stage_classification_loss))
    second_stage_classification_loss_weight = (
        frcnn_config.second_stage_classification_loss_weight)
    second_stage_mask_prediction_loss_weight = (
        frcnn_config.second_stage_mask_prediction_loss_weight)

    hard_example_miner = None
    if frcnn_config.HasField('hard_example_miner'):
        hard_example_miner = losses_builder.build_hard_example_miner(
            frcnn_config.hard_example_miner,
            second_stage_classification_loss_weight,
            second_stage_localization_loss_weight)

    crop_and_resize_fn = (ops.matmul_crop_and_resize
                          if frcnn_config.use_matmul_crop_and_resize else
                          ops.native_crop_and_resize)
    clip_anchors_to_image = (frcnn_config.clip_anchors_to_image)

    common_kwargs = {
        'is_training': is_training,
        'num_classes': num_classes,
        'image_resizer_fn': image_resizer_fn,
        'feature_extractor': feature_extractor,
        'number_of_stages': number_of_stages,
        'first_stage_anchor_generator': first_stage_anchor_generator,
        'first_stage_target_assigner': first_stage_target_assigner,
        'first_stage_atrous_rate': first_stage_atrous_rate,
        'first_stage_box_predictor_arg_scope_fn':
        first_stage_box_predictor_arg_scope_fn,
        'first_stage_box_predictor_kernel_size':
        first_stage_box_predictor_kernel_size,
        'first_stage_box_predictor_depth': first_stage_box_predictor_depth,
        'first_stage_minibatch_size': first_stage_minibatch_size,
        'first_stage_sampler': first_stage_sampler,
        'first_stage_non_max_suppression_fn':
        first_stage_non_max_suppression_fn,
        'first_stage_max_proposals': first_stage_max_proposals,
        'first_stage_localization_loss_weight': first_stage_loc_loss_weight,
        'first_stage_objectness_loss_weight': first_stage_obj_loss_weight,
        'second_stage_target_assigner': second_stage_target_assigner,
        'second_stage_batch_size': second_stage_batch_size,
        'second_stage_sampler': second_stage_sampler,
        'second_stage_non_max_suppression_fn':
        second_stage_non_max_suppression_fn,
        'second_stage_score_conversion_fn': second_stage_score_conversion_fn,
        'second_stage_localization_loss_weight':
        second_stage_localization_loss_weight,
        'second_stage_classification_loss': second_stage_classification_loss,
        'second_stage_classification_loss_weight':
        second_stage_classification_loss_weight,
        'hard_example_miner': hard_example_miner,
        'add_summaries': add_summaries,
        'crop_and_resize_fn': crop_and_resize_fn,
        'clip_anchors_to_image': clip_anchors_to_image,
        'use_static_shapes': use_static_shapes,
        'resize_masks': frcnn_config.resize_masks
    }

    if isinstance(second_stage_box_predictor,
                  rfcn_box_predictor.RfcnBoxPredictor):
        return rfcn_meta_arch.RFCNMetaArch(
            second_stage_rfcn_box_predictor=second_stage_box_predictor,
            **common_kwargs)
    elif meta_architecture == 'faster_rcnn':
        return faster_rcnn_meta_arch.FasterRCNNMetaArch(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            second_stage_mask_prediction_loss_weight=(
                second_stage_mask_prediction_loss_weight),
            **common_kwargs)
    elif meta_architecture == 'faster_rcnn_override_RPN':
        return faster_rcnn_meta_arch_override_RPN.FasterRCNNMetaArchOverrideRPN(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            first_stage_proposals_path=first_stage_proposals_path,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            second_stage_mask_prediction_loss_weight=(
                second_stage_mask_prediction_loss_weight),
            **common_kwargs)
    elif meta_architecture == 'faster_rcnn_rpn_blend':
        common_kwargs['use_matmul_crop_and_resize'] = False
        common_kwargs[
            'first_stage_nms_iou_threshold'] = frcnn_config.first_stage_nms_iou_threshold
        common_kwargs[
            'first_stage_nms_score_threshold'] = frcnn_config.first_stage_nms_score_threshold
        common_kwargs.pop('crop_and_resize_fn')
        common_kwargs.pop('first_stage_non_max_suppression_fn')
        common_kwargs.pop('resize_masks')
        common_kwargs.pop('use_static_shapes')
        return faster_rcnn_meta_arch_rpn_blend.FasterRCNNMetaArchRPNBlend(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            first_stage_proposals_path=first_stage_proposals_path,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            second_stage_mask_prediction_loss_weight=(
                second_stage_mask_prediction_loss_weight),
            **common_kwargs)
Exemple #34
0
def _build_ssd_model(ssd_config, is_training):
  """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.

  Returns:
    SSDMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = ssd_config.num_classes  #number of clases 

  # Feature extractor
  feature_extractor = _build_ssd_feature_extractor(ssd_config.feature_extractor,      #we use ssd_mobilenet_v1 as the feature extractor 
                                                   is_training)    #set the class in ssd_mobilenr_v1_feature_extractor amd ssd_meta+arch.py 

#when taking the regression loss we are working with some transorfmation. That means our predictors will predict 4 cordinates and those codinates should be regressed with some kind embedding which was made with ground truth boxes and default boxes , then after getting those we docode them for real images 


  box_coder = box_coder_builder.build(ssd_config.box_coder) #set en encoding w.r.t ground truth boxes and achor boxes . The output creating with this object will then regressed with the predicted onece. chenck equation 2 in the ssd paper 
  matcher = matcher_builder.build(ssd_config.matcher) #matching the predicted to ground trunth- Builds a matcher object based on the matcher config
#in obove object matching is done with default boxes and ground truth boxes , that's how xij value in the paper obtained . 

  region_similarity_calculator = sim_calc.build(         #how to calculate the similarity parameter is iou .
      ssd_config.similarity_calculator)

  ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,    #This will take care of the convolutional kernal 
                                                  ssd_config.box_predictor,    
                                                  is_training, num_classes)  #this returns a box_predictor object 


  anchor_generator = anchor_generator_builder.build(         #pass an instance or object where we can create ancho boxes for differen featuremaps
      ssd_config.anchor_generator)

  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)    #this is imortatnt  we use   fixed_shape_resizer

  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(   #this is to work with NMS supression  output
      ssd_config.post_processing)     #score conversion function will convert logits to probabilities 

  (classification_loss, localization_loss, classification_weight,
   localization_weight,
   hard_example_miner) = losses_builder.build(ssd_config.loss)           #now the loss for hard examples  these outputs are objects 

  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches # we devide by the matching acnhorboxes 

  return ssd_meta_arch.SSDMetaArch(        #here we initialized a object of ssd_meta_arch which will be used in trainign 
      is_training,
      anchor_generator,
      ssd_box_predictor,
      box_coder,
      feature_extractor,
      matcher,
      region_similarity_calculator,
      image_resizer_fn,
      non_max_suppression_fn,
      score_conversion_fn,
      classification_loss,
      localization_loss,
      classification_weight,
      localization_weight,
      normalize_loss_by_num_matches,
      hard_example_miner)
Exemple #35
0
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries, **kwargs):
    """Builds a Faster R-CNN or R-FCN detection model based on the model config.

    Builds R-FCN model if the second_stage_box_predictor in the config is of type
    `rfcn_box_predictor` else builds a Faster R-CNN model.

    Args:
      frcnn_config: A faster_rcnn.proto object containing the config for the
        desired FasterRCNNMetaArch or RFCNMetaArch.
      is_training: True if this model is being built for training purposes.
      add_summaries: Whether to add tf summaries in the model.
      kwargs: key-value
              'rpn_type' is the type of rpn which is 'cascade_rpn','orign_rpn'
                  and 'without_rpn' which need some boxes replacing the proposal
                  generated by rpn
              'filter_fn_arg' is the args of filter fn which need the boxes to filter
                  the proposals.
              'replace_rpn_arg' is a dictionary.
                  only if the rpn_type=='without_rpn' and not None, it's useful in order to
                  replace the proposals generated by rpn with the gt which maybe adjusted.
                   'type': a string which is 'gt' or 'others'.
                   'scale': a float which is used to scale the boxes(maybe gt).

    Returns:
      FasterRCNNMetaArch based on the config.

    Raises:
      ValueError: If frcnn_config.type is not recognized (i.e. not registered in
        model_class_map).
    """
    num_classes = frcnn_config.num_classes
    image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

    feature_extractor = _build_faster_rcnn_feature_extractor(
        frcnn_config.feature_extractor, is_training,
        inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)

    number_of_stages = frcnn_config.number_of_stages
    first_stage_anchor_generator = anchor_generator_builder.build(
        frcnn_config.first_stage_anchor_generator)

    first_stage_target_assigner = target_assigner.create_target_assigner(
        'FasterRCNN',
        'proposal',
        use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
    first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
    first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
        frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
    first_stage_box_predictor_kernel_size = (
        frcnn_config.first_stage_box_predictor_kernel_size)
    first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
    first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
    use_static_shapes = frcnn_config.use_static_shapes and (
            frcnn_config.use_static_shapes_for_eval or is_training)
    first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
        positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
        is_static=(frcnn_config.use_static_balanced_label_sampler and
                   use_static_shapes))
    first_stage_max_proposals = frcnn_config.first_stage_max_proposals
    if (frcnn_config.first_stage_nms_iou_threshold < 0 or
            frcnn_config.first_stage_nms_iou_threshold > 1.0):
        raise ValueError('iou_threshold not in [0, 1.0].')
    if (is_training and frcnn_config.second_stage_batch_size >
            first_stage_max_proposals):
        raise ValueError('second_stage_batch_size should be no greater than '
                         'first_stage_max_proposals.')
    first_stage_non_max_suppression_fn = functools.partial(
        post_processing.batch_multiclass_non_max_suppression,
        score_thresh=frcnn_config.first_stage_nms_score_threshold,
        iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
        max_size_per_class=frcnn_config.first_stage_max_proposals,
        max_total_size=frcnn_config.first_stage_max_proposals,
        use_static_shapes=use_static_shapes)
    first_stage_loc_loss_weight = (
        frcnn_config.first_stage_localization_loss_weight)
    first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

    initial_crop_size = frcnn_config.initial_crop_size
    maxpool_kernel_size = frcnn_config.maxpool_kernel_size
    maxpool_stride = frcnn_config.maxpool_stride

    second_stage_target_assigner = target_assigner.create_target_assigner(
        'FasterRCNN',
        'detection',
        use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
    second_stage_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build,
        frcnn_config.second_stage_box_predictor,
        is_training=is_training,
        num_classes=num_classes)
    second_stage_batch_size = frcnn_config.second_stage_batch_size
    second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
        positive_fraction=frcnn_config.second_stage_balance_fraction,
        is_static=(frcnn_config.use_static_balanced_label_sampler and
                   use_static_shapes))
    (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn
     ) = post_processing_builder.build(frcnn_config.second_stage_post_processing)
    second_stage_localization_loss_weight = (
        frcnn_config.second_stage_localization_loss_weight)
    second_stage_classification_loss = (
        losses_builder.build_faster_rcnn_classification_loss(
            frcnn_config.second_stage_classification_loss))
    second_stage_classification_loss_weight = (
        frcnn_config.second_stage_classification_loss_weight)
    second_stage_mask_prediction_loss_weight = (
        frcnn_config.second_stage_mask_prediction_loss_weight)

    hard_example_miner = None
    if frcnn_config.HasField('hard_example_miner'):
        hard_example_miner = losses_builder.build_hard_example_miner(
            frcnn_config.hard_example_miner,
            second_stage_classification_loss_weight,
            second_stage_localization_loss_weight)

    crop_and_resize_fn = (
        ops.matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize
        else ops.native_crop_and_resize)
    clip_anchors_to_image = (
        frcnn_config.clip_anchors_to_image)

    common_kwargs = {
        'is_training': is_training,
        'num_classes': num_classes,
        'image_resizer_fn': image_resizer_fn,
        'feature_extractor': feature_extractor,
        'number_of_stages': number_of_stages,
        'first_stage_anchor_generator': first_stage_anchor_generator,
        'first_stage_target_assigner': first_stage_target_assigner,
        'first_stage_atrous_rate': first_stage_atrous_rate,
        'first_stage_box_predictor_arg_scope_fn':
            first_stage_box_predictor_arg_scope_fn,
        'first_stage_box_predictor_kernel_size':
            first_stage_box_predictor_kernel_size,
        'first_stage_box_predictor_depth': first_stage_box_predictor_depth,
        'first_stage_minibatch_size': first_stage_minibatch_size,
        'first_stage_sampler': first_stage_sampler,
        'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn,
        'first_stage_max_proposals': first_stage_max_proposals,
        'first_stage_localization_loss_weight': first_stage_loc_loss_weight,
        'first_stage_objectness_loss_weight': first_stage_obj_loss_weight,
        'second_stage_target_assigner': second_stage_target_assigner,
        'second_stage_batch_size': second_stage_batch_size,
        'second_stage_sampler': second_stage_sampler,
        'second_stage_non_max_suppression_fn':
            second_stage_non_max_suppression_fn,
        'second_stage_score_conversion_fn': second_stage_score_conversion_fn,
        'second_stage_localization_loss_weight':
            second_stage_localization_loss_weight,
        'second_stage_classification_loss':
            second_stage_classification_loss,
        'second_stage_classification_loss_weight':
            second_stage_classification_loss_weight,
        'hard_example_miner': hard_example_miner,
        'add_summaries': add_summaries,
        'crop_and_resize_fn': crop_and_resize_fn,
        'clip_anchors_to_image': clip_anchors_to_image,
        'use_static_shapes': use_static_shapes,
        'resize_masks': frcnn_config.resize_masks
    }

    filter_fn_arg = kwargs.get('filter_fn_arg')
    if filter_fn_arg:
        filter_fn = functools.partial(filter_bbox, **filter_fn_arg)
        common_kwargs['filter_fn'] = filter_fn
    rpn_type = kwargs.get('rpn_type')
    if rpn_type:
        common_kwargs['rpn_type'] = rpn_type
    replace_rpn_arg = kwargs.get('replace_rpn_arg')
    if replace_rpn_arg:
        common_kwargs['replace_rpn_arg'] = replace_rpn_arg

    if isinstance(second_stage_box_predictor,
                  rfcn_box_predictor.RfcnBoxPredictor):
        return rfcn_meta_arch.RFCNMetaArch(
            second_stage_rfcn_box_predictor=second_stage_box_predictor,
            **common_kwargs)
    else:
        return faster_rcnn_meta_arch.FasterRCNNMetaArch(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            second_stage_mask_prediction_loss_weight=(
                second_stage_mask_prediction_loss_weight),
            **common_kwargs)
Exemple #36
0
def _build_ssd_model(ssd_config, is_training, add_summaries):
    """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.
  Returns:
    SSDMetaArch based on the config.

  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(
        feature_extractor_config=ssd_config.feature_extractor,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        is_training=is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    matcher = matcher_builder.build(ssd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    encode_background_as_zeros = ssd_config.encode_background_as_zeros
    negative_class_weight = ssd_config.negative_class_weight
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    if feature_extractor.is_keras_model:
        ssd_box_predictor = box_predictor_builder.build_keras(
            hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
            freeze_batchnorm=ssd_config.freeze_batchnorm,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=anchor_generator.
            num_anchors_per_location(),
            box_predictor_config=ssd_config.box_predictor,
            is_training=is_training,
            num_classes=num_classes,
            add_background_class=ssd_config.add_background_class)
    else:
        ssd_box_predictor = box_predictor_builder.build(
            hyperparams_builder.build, ssd_config.box_predictor, is_training,
            num_classes, ssd_config.add_background_class)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight, hard_example_miner, random_example_sampler,
     expected_loss_weights_fn) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
    normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize

    equalization_loss_config = ops.EqualizationLossConfig(
        weight=ssd_config.loss.equalization_loss.weight,
        exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes)

    target_assigner_instance = target_assigner.TargetAssigner(
        region_similarity_calculator,
        matcher,
        box_coder,
        negative_class_weight=negative_class_weight)

    ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
    kwargs = {}

    return ssd_meta_arch_fn(
        is_training=is_training,
        anchor_generator=anchor_generator,
        box_predictor=ssd_box_predictor,
        box_coder=box_coder,
        feature_extractor=feature_extractor,
        encode_background_as_zeros=encode_background_as_zeros,
        image_resizer_fn=image_resizer_fn,
        non_max_suppression_fn=non_max_suppression_fn,
        score_conversion_fn=score_conversion_fn,
        classification_loss=classification_loss,
        localization_loss=localization_loss,
        classification_loss_weight=classification_weight,
        localization_loss_weight=localization_weight,
        normalize_loss_by_num_matches=normalize_loss_by_num_matches,
        hard_example_miner=hard_example_miner,
        target_assigner_instance=target_assigner_instance,
        add_summaries=add_summaries,
        normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
        add_background_class=ssd_config.add_background_class,
        explicit_background_class=ssd_config.explicit_background_class,
        random_example_sampler=random_example_sampler,
        expected_loss_weights_fn=expected_loss_weights_fn,
        use_confidences_as_targets=ssd_config.use_confidences_as_targets,
        implicit_example_weight=ssd_config.implicit_example_weight,
        equalization_loss_config=equalization_loss_config,
        return_raw_detections_during_predict=(
            ssd_config.return_raw_detections_during_predict),
        **kwargs)
Exemple #37
0
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
  """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
      desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    FasterRCNNMetaArch based on the config.
  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = frcnn_config.num_classes
  image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

  feature_extractor = _build_faster_rcnn_feature_extractor(
      frcnn_config.feature_extractor, is_training)

  number_of_stages = frcnn_config.number_of_stages
  first_stage_anchor_generator = anchor_generator_builder.build(
      frcnn_config.first_stage_anchor_generator)

  first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
  first_stage_box_predictor_arg_scope = hyperparams_builder.build(
      frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
  first_stage_box_predictor_kernel_size = (
      frcnn_config.first_stage_box_predictor_kernel_size)
  first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
  first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
  first_stage_positive_balance_fraction = (
      frcnn_config.first_stage_positive_balance_fraction)
  first_stage_nms_score_threshold = frcnn_config.first_stage_nms_score_threshold
  first_stage_nms_iou_threshold = frcnn_config.first_stage_nms_iou_threshold
  first_stage_max_proposals = frcnn_config.first_stage_max_proposals
  first_stage_loc_loss_weight = (
      frcnn_config.first_stage_localization_loss_weight)
  first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

  initial_crop_size = frcnn_config.initial_crop_size
  maxpool_kernel_size = frcnn_config.maxpool_kernel_size
  maxpool_stride = frcnn_config.maxpool_stride

  second_stage_box_predictor = box_predictor_builder.build(
      hyperparams_builder.build,
      frcnn_config.second_stage_box_predictor,
      is_training=is_training,
      num_classes=num_classes)
  second_stage_batch_size = frcnn_config.second_stage_batch_size
  second_stage_balance_fraction = frcnn_config.second_stage_balance_fraction
  (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn
  ) = post_processing_builder.build(frcnn_config.second_stage_post_processing)
  second_stage_localization_loss_weight = (
      frcnn_config.second_stage_localization_loss_weight)
  second_stage_classification_loss = (
      losses_builder.build_faster_rcnn_classification_loss(
          frcnn_config.second_stage_classification_loss))
  second_stage_classification_loss_weight = (
      frcnn_config.second_stage_classification_loss_weight)
  second_stage_mask_prediction_loss_weight = (
      frcnn_config.second_stage_mask_prediction_loss_weight)

  hard_example_miner = None
  if frcnn_config.HasField('hard_example_miner'):
    hard_example_miner = losses_builder.build_hard_example_miner(
        frcnn_config.hard_example_miner,
        second_stage_classification_loss_weight,
        second_stage_localization_loss_weight)

  common_kwargs = {
      'is_training': is_training,
      'num_classes': num_classes,
      'image_resizer_fn': image_resizer_fn,
      'feature_extractor': feature_extractor,
      'number_of_stages': number_of_stages,
      'first_stage_anchor_generator': first_stage_anchor_generator,
      'first_stage_atrous_rate': first_stage_atrous_rate,
      'first_stage_box_predictor_arg_scope':
      first_stage_box_predictor_arg_scope,
      'first_stage_box_predictor_kernel_size':
      first_stage_box_predictor_kernel_size,
      'first_stage_box_predictor_depth': first_stage_box_predictor_depth,
      'first_stage_minibatch_size': first_stage_minibatch_size,
      'first_stage_positive_balance_fraction':
      first_stage_positive_balance_fraction,
      'first_stage_nms_score_threshold': first_stage_nms_score_threshold,
      'first_stage_nms_iou_threshold': first_stage_nms_iou_threshold,
      'first_stage_max_proposals': first_stage_max_proposals,
      'first_stage_localization_loss_weight': first_stage_loc_loss_weight,
      'first_stage_objectness_loss_weight': first_stage_obj_loss_weight,
      'second_stage_batch_size': second_stage_batch_size,
      'second_stage_balance_fraction': second_stage_balance_fraction,
      'second_stage_non_max_suppression_fn':
      second_stage_non_max_suppression_fn,
      'second_stage_score_conversion_fn': second_stage_score_conversion_fn,
      'second_stage_localization_loss_weight':
      second_stage_localization_loss_weight,
      'second_stage_classification_loss':
      second_stage_classification_loss,
      'second_stage_classification_loss_weight':
      second_stage_classification_loss_weight,
      'hard_example_miner': hard_example_miner,
      'add_summaries': add_summaries}

  if isinstance(second_stage_box_predictor, box_predictor.RfcnBoxPredictor):
    return rfcn_meta_arch.RFCNMetaArch(
        second_stage_rfcn_box_predictor=second_stage_box_predictor,
        **common_kwargs)
  else:
    return faster_rcnn_meta_arch.FasterRCNNMetaArch(
        initial_crop_size=initial_crop_size,
        maxpool_kernel_size=maxpool_kernel_size,
        maxpool_stride=maxpool_stride,
        second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
        second_stage_mask_prediction_loss_weight=(
            second_stage_mask_prediction_loss_weight),
        **common_kwargs)
Exemple #38
0
def _build_ssd_model(ssd_config, is_training, add_summaries):
  
  num_classes = ssd_config.num_classes

  # Feature extractor
  feature_extractor = _build_ssd_feature_extractor(
      feature_extractor_config=ssd_config.feature_extractor,
      freeze_batchnorm=ssd_config.freeze_batchnorm,
      is_training=is_training)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)
  encode_background_as_zeros = ssd_config.encode_background_as_zeros
  negative_class_weight = ssd_config.negative_class_weight
  anchor_generator = anchor_generator_builder.build(
      ssd_config.anchor_generator)
  if feature_extractor.is_keras_model:
    ssd_box_predictor = box_predictor_builder.build_keras(
        hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        inplace_batchnorm_update=False,
        num_predictions_per_location_list=anchor_generator
        .num_anchors_per_location(),
        box_predictor_config=ssd_config.box_predictor,
        is_training=is_training,
        num_classes=num_classes,
        add_background_class=ssd_config.add_background_class)
  else:
    ssd_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build, ssd_config.box_predictor, is_training,
        num_classes, ssd_config.add_background_class)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight, hard_example_miner, random_example_sampler,
   expected_loss_weights_fn) = losses_builder.build(ssd_config.loss)
  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
  normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize

  equalization_loss_config = ops.EqualizationLossConfig(
      weight=ssd_config.loss.equalization_loss.weight,
      exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes)

  target_assigner_instance = target_assigner.TargetAssigner(
      region_similarity_calculator,
      matcher,
      box_coder,
      negative_class_weight=negative_class_weight)

  ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
  kwargs = {}

  return ssd_meta_arch_fn(
      is_training=is_training,
      anchor_generator=anchor_generator,
      box_predictor=ssd_box_predictor,
      box_coder=box_coder,
      feature_extractor=feature_extractor,
      encode_background_as_zeros=encode_background_as_zeros,
      image_resizer_fn=image_resizer_fn,
      non_max_suppression_fn=non_max_suppression_fn,
      score_conversion_fn=score_conversion_fn,
      classification_loss=classification_loss,
      localization_loss=localization_loss,
      classification_loss_weight=classification_weight,
      localization_loss_weight=localization_weight,
      normalize_loss_by_num_matches=normalize_loss_by_num_matches,
      hard_example_miner=hard_example_miner,
      target_assigner_instance=target_assigner_instance,
      add_summaries=add_summaries,
      normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
      freeze_batchnorm=ssd_config.freeze_batchnorm,
      inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
      add_background_class=ssd_config.add_background_class,
      explicit_background_class=ssd_config.explicit_background_class,
      random_example_sampler=random_example_sampler,
      expected_loss_weights_fn=expected_loss_weights_fn,
      use_confidences_as_targets=ssd_config.use_confidences_as_targets,
      implicit_example_weight=ssd_config.implicit_example_weight,
      equalization_loss_config=equalization_loss_config,
      **kwargs)
Exemple #39
0
def _build_lstm_model(ssd_config, lstm_config, is_training):
  """Builds an LSTM detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      LSTMMetaArch.
    lstm_config: LstmModel config proto that specifies LSTM train/eval configs.
    is_training: True if this model is being built for training purposes.

  Returns:
    LSTMMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map), or if lstm_config.interleave_strategy is not recognized.
    ValueError: If unroll_length is not specified in the config file.
  """
  feature_extractor = _build_lstm_feature_extractor(
      ssd_config.feature_extractor, is_training, lstm_config.lstm_state_depth)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)

  num_classes = ssd_config.num_classes
  ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                  ssd_config.box_predictor,
                                                  is_training, num_classes)
  anchor_generator = anchor_generator_builder.build(ssd_config.anchor_generator)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight, miner, _, _) = losses_builder.build(ssd_config.loss)

  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
  encode_background_as_zeros = ssd_config.encode_background_as_zeros
  negative_class_weight = ssd_config.negative_class_weight

  # Extra configs for lstm unroll length.
  unroll_length = None
  if 'lstm' in ssd_config.feature_extractor.type:
    if is_training:
      unroll_length = lstm_config.train_unroll_length
    else:
      unroll_length = lstm_config.eval_unroll_length
  if unroll_length is None:
    raise ValueError('No unroll length found in the config file')

  target_assigner_instance = target_assigner.TargetAssigner(
      region_similarity_calculator,
      matcher,
      box_coder,
      negative_class_weight=negative_class_weight)

  lstm_model = lstm_meta_arch.LSTMMetaArch(
      is_training=is_training,
      anchor_generator=anchor_generator,
      box_predictor=ssd_box_predictor,
      box_coder=box_coder,
      feature_extractor=feature_extractor,
      encode_background_as_zeros=encode_background_as_zeros,
      image_resizer_fn=image_resizer_fn,
      non_max_suppression_fn=non_max_suppression_fn,
      score_conversion_fn=score_conversion_fn,
      classification_loss=classification_loss,
      localization_loss=localization_loss,
      classification_loss_weight=classification_weight,
      localization_loss_weight=localization_weight,
      normalize_loss_by_num_matches=normalize_loss_by_num_matches,
      hard_example_miner=miner,
      unroll_length=unroll_length,
      target_assigner_instance=target_assigner_instance)

  return lstm_model
Exemple #40
0
def _build_ssd_model(ssd_config,
                     is_training,
                     add_summaries,
                     add_background_class=True):
    """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.
    add_background_class: Whether to add an implicit background class to one-hot
      encodings of groundtruth labels. Set to false if using groundtruth labels
      with an explicit background class or using multiclass scores instead of
      truth in the case of distillation.
  Returns:
    SSDMetaArch based on the config.

  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(
        feature_extractor_config=ssd_config.feature_extractor,
        is_training=is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    matcher = matcher_builder.build(ssd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    encode_background_as_zeros = ssd_config.encode_background_as_zeros
    negative_class_weight = ssd_config.negative_class_weight
    ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                    ssd_config.box_predictor,
                                                    is_training, num_classes)
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight, hard_example_miner,
     random_example_sampler) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
    normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize
    weight_regression_loss_by_score = (
        ssd_config.weight_regression_loss_by_score)

    target_assigner_instance = target_assigner.TargetAssigner(
        region_similarity_calculator,
        matcher,
        box_coder,
        negative_class_weight=negative_class_weight,
        weight_regression_loss_by_score=weight_regression_loss_by_score)

    expected_classification_loss_under_sampling = None
    if ssd_config.use_expected_classification_loss_under_sampling:
        expected_classification_loss_under_sampling = functools.partial(
            ops.expected_classification_loss_under_sampling,
            minimum_negative_sampling=ssd_config.minimum_negative_sampling,
            desired_negative_sampling_ratio=ssd_config.
            desired_negative_sampling_ratio)

    ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
    # BEGIN GOOGLE-INTERNAL
    # TODO(lzc): move ssd_mask_meta_arch to third party when it has decent
    # performance relative to a comparable Mask R-CNN model (b/112561592).
    predictor_config = ssd_config.box_predictor
    predict_instance_masks = False
    if predictor_config.WhichOneof(
            'box_predictor_oneof') == 'convolutional_box_predictor':
        predict_instance_masks = (
            predictor_config.convolutional_box_predictor.HasField('mask_head'))
    elif predictor_config.WhichOneof(
            'box_predictor_oneof'
    ) == 'weight_shared_convolutional_box_predictor':
        predict_instance_masks = (
            predictor_config.weight_shared_convolutional_box_predictor.
            HasField('mask_head'))
    if predict_instance_masks:
        ssd_meta_arch_fn = ssd_mask_meta_arch.SSDMaskMetaArch
    # END GOOGLE-INTERNAL

    return ssd_meta_arch_fn(
        is_training=is_training,
        anchor_generator=anchor_generator,
        box_predictor=ssd_box_predictor,
        box_coder=box_coder,
        feature_extractor=feature_extractor,
        encode_background_as_zeros=encode_background_as_zeros,
        image_resizer_fn=image_resizer_fn,
        non_max_suppression_fn=non_max_suppression_fn,
        score_conversion_fn=score_conversion_fn,
        classification_loss=classification_loss,
        localization_loss=localization_loss,
        classification_loss_weight=classification_weight,
        localization_loss_weight=localization_weight,
        normalize_loss_by_num_matches=normalize_loss_by_num_matches,
        hard_example_miner=hard_example_miner,
        target_assigner_instance=target_assigner_instance,
        add_summaries=add_summaries,
        normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
        add_background_class=add_background_class,
        random_example_sampler=random_example_sampler,
        expected_classification_loss_under_sampling=
        expected_classification_loss_under_sampling)
Exemple #41
0
def _build_faster_rcnn_model(frcnn_config, is_training, mtl=None):
    """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
    desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.

  Returns:
    FasterRCNNMetaArch based on the config.
  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = frcnn_config.num_classes
    image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

    feature_extractor_kwargs = {}
    feature_extractor_kwargs[
        'freeze_layer'] = frcnn_config.feature_extractor.freeze_layer
    feature_extractor_kwargs[
        'batch_norm_trainable'] = frcnn_config.feature_extractor.batch_norm_trainable

    if frcnn_config.feature_extractor.HasField('weight_decay'):
        feature_extractor_kwargs['weight_decay'] = \
            frcnn_config.feature_extractor.weight_decay
    feature_extractor = _build_faster_rcnn_feature_extractor(
        frcnn_config.feature_extractor,
        is_training and frcnn_config.feature_extractor.trainable,
        reuse_weights=tf.AUTO_REUSE,
        **feature_extractor_kwargs)

    first_stage_only = frcnn_config.first_stage_only
    first_stage_anchor_generator = anchor_generator_builder.build(
        frcnn_config.first_stage_anchor_generator)

    first_stage_clip_window = frcnn_config.first_stage_clip_window
    first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
    first_stage_box_predictor_trainable = \
        frcnn_config.first_stage_box_predictor_trainable
    first_stage_box_predictor_arg_scope = hyperparams_builder.build(
        frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
    first_stage_box_predictor_kernel_size = (
        frcnn_config.first_stage_box_predictor_kernel_size)
    first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
    first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
    first_stage_positive_balance_fraction = (
        frcnn_config.first_stage_positive_balance_fraction)
    first_stage_nms_score_threshold = frcnn_config.first_stage_nms_score_threshold
    first_stage_nms_iou_threshold = frcnn_config.first_stage_nms_iou_threshold
    first_stage_max_proposals = frcnn_config.first_stage_max_proposals
    first_stage_loc_loss_weight = (
        frcnn_config.first_stage_localization_loss_weight)
    first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

    initial_crop_size = frcnn_config.initial_crop_size
    maxpool_kernel_size = frcnn_config.maxpool_kernel_size
    maxpool_stride = frcnn_config.maxpool_stride

    second_stage_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build,
        frcnn_config.second_stage_box_predictor,
        is_training=is_training
        and frcnn_config.second_stage_box_predictor.trainable,
        num_classes=num_classes,
        reuse_weights=tf.AUTO_REUSE)
    second_stage_batch_size = frcnn_config.second_stage_batch_size
    second_stage_balance_fraction = frcnn_config.second_stage_balance_fraction
    (second_stage_non_max_suppression_fn,
     second_stage_score_conversion_fn) = post_processing_builder.build(
         frcnn_config.second_stage_post_processing)
    second_stage_localization_loss_weight = (
        frcnn_config.second_stage_localization_loss_weight)
    second_stage_classification_loss_weight = (
        frcnn_config.second_stage_classification_loss_weight)

    if mtl.window:
        window_box_predictor = box_predictor_builder.build(
            hyperparams_builder.build,
            mtl.window_box_predictor,
            is_training=is_training and mtl.window_box_predictor.trainable,
            num_classes=num_classes + 1,
            reuse_weights=tf.AUTO_REUSE)
    else:
        window_box_predictor = second_stage_box_predictor

    if mtl.closeness:
        closeness_box_predictor = box_predictor_builder.build(
            hyperparams_builder.build,
            mtl.closeness_box_predictor,
            is_training=is_training and mtl.closeness_box_predictor.trainable,
            num_classes=num_classes + 1,
            reuse_weights=tf.AUTO_REUSE)
    else:
        closeness_box_predictor = second_stage_box_predictor

    if mtl.edgemask:
        edgemask_predictor = mask_predictor_builder.build(
            hyperparams_builder.build,
            mtl.edgemask_predictor,
            is_training=is_training and mtl.edgemask_predictor.trainable,
            num_classes=2,
            reuse_weights=tf.AUTO_REUSE,
            channels=1)
    else:
        edgemask_predictor = None

    mtl_refiner_arg_scope = None
    if mtl.refine:
        mtl_refiner_arg_scope = hyperparams_builder.build(
            mtl.refiner_fc_hyperparams, is_training)

    hard_example_miner = None
    if frcnn_config.HasField('hard_example_miner'):
        hard_example_miner = losses_builder.build_hard_example_miner(
            frcnn_config.hard_example_miner,
            second_stage_classification_loss_weight,
            second_stage_localization_loss_weight)

    common_kwargs = {
        'is_training': is_training,
        'num_classes': num_classes,
        'image_resizer_fn': image_resizer_fn,
        'feature_extractor': feature_extractor,
        'first_stage_only': first_stage_only,
        'first_stage_anchor_generator': first_stage_anchor_generator,
        'first_stage_clip_window': first_stage_clip_window,
        'first_stage_atrous_rate': first_stage_atrous_rate,
        'first_stage_box_predictor_trainable':
        first_stage_box_predictor_trainable,
        'first_stage_box_predictor_arg_scope':
        first_stage_box_predictor_arg_scope,
        'first_stage_box_predictor_kernel_size':
        first_stage_box_predictor_kernel_size,
        'first_stage_box_predictor_depth': first_stage_box_predictor_depth,
        'first_stage_minibatch_size': first_stage_minibatch_size,
        'first_stage_positive_balance_fraction':
        first_stage_positive_balance_fraction,
        'first_stage_nms_score_threshold': first_stage_nms_score_threshold,
        'first_stage_nms_iou_threshold': first_stage_nms_iou_threshold,
        'first_stage_max_proposals': first_stage_max_proposals,
        'first_stage_localization_loss_weight': first_stage_loc_loss_weight,
        'first_stage_objectness_loss_weight': first_stage_obj_loss_weight,
        'second_stage_batch_size': second_stage_batch_size,
        'second_stage_balance_fraction': second_stage_balance_fraction,
        'second_stage_non_max_suppression_fn':
        second_stage_non_max_suppression_fn,
        'second_stage_score_conversion_fn': second_stage_score_conversion_fn,
        'second_stage_localization_loss_weight':
        second_stage_localization_loss_weight,
        'second_stage_classification_loss_weight':
        second_stage_classification_loss_weight,
        'hard_example_miner': hard_example_miner,
        'mtl': mtl,
        'mtl_refiner_arg_scope': mtl_refiner_arg_scope,
        'window_box_predictor': window_box_predictor,
        'closeness_box_predictor': closeness_box_predictor,
        'edgemask_predictor': edgemask_predictor
    }

    if isinstance(second_stage_box_predictor, box_predictor.RfcnBoxPredictor):
        return rfcn_meta_arch.RFCNMetaArch(
            second_stage_rfcn_box_predictor=second_stage_box_predictor,
            **common_kwargs)
    else:
        return faster_rcnn_meta_arch.FasterRCNNMetaArch(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            **common_kwargs)
Exemple #42
0
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
  """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
      desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    FasterRCNNMetaArch based on the config.

  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = frcnn_config.num_classes
  image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

  feature_extractor = _build_faster_rcnn_feature_extractor(
      frcnn_config.feature_extractor, is_training,
      frcnn_config.inplace_batchnorm_update)

  number_of_stages = frcnn_config.number_of_stages
  first_stage_anchor_generator = anchor_generator_builder.build(
      frcnn_config.first_stage_anchor_generator)

  first_stage_target_assigner = target_assigner.create_target_assigner(
      'FasterRCNN',
      'proposal',
      use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
  first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
  first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
      frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
  first_stage_box_predictor_kernel_size = (
      frcnn_config.first_stage_box_predictor_kernel_size)
  first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
  first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
  use_static_shapes = frcnn_config.use_static_shapes
  first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
      positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
      is_static=(frcnn_config.use_static_balanced_label_sampler and
                 use_static_shapes))
  first_stage_max_proposals = frcnn_config.first_stage_max_proposals
  if (frcnn_config.first_stage_nms_iou_threshold < 0 or
      frcnn_config.first_stage_nms_iou_threshold > 1.0):
    raise ValueError('iou_threshold not in [0, 1.0].')
  if (is_training and frcnn_config.second_stage_batch_size >
      first_stage_max_proposals):
    raise ValueError('second_stage_batch_size should be no greater than '
                     'first_stage_max_proposals.')
  first_stage_non_max_suppression_fn = functools.partial(
      post_processing.batch_multiclass_non_max_suppression,
      score_thresh=frcnn_config.first_stage_nms_score_threshold,
      iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
      max_size_per_class=frcnn_config.first_stage_max_proposals,
      max_total_size=frcnn_config.first_stage_max_proposals,
      use_static_shapes=use_static_shapes)
  first_stage_loc_loss_weight = (
      frcnn_config.first_stage_localization_loss_weight)
  first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

  initial_crop_size = frcnn_config.initial_crop_size
  maxpool_kernel_size = frcnn_config.maxpool_kernel_size
  maxpool_stride = frcnn_config.maxpool_stride

  second_stage_target_assigner = target_assigner.create_target_assigner(
      'FasterRCNN',
      'detection',
      use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
  second_stage_box_predictor = box_predictor_builder.build(
      hyperparams_builder.build,
      frcnn_config.second_stage_box_predictor,
      is_training=is_training,
      num_classes=num_classes)
  second_stage_batch_size = frcnn_config.second_stage_batch_size
  second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
      positive_fraction=frcnn_config.second_stage_balance_fraction,
      is_static=(frcnn_config.use_static_balanced_label_sampler and
                 use_static_shapes))
  (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn
  ) = post_processing_builder.build(frcnn_config.second_stage_post_processing)
  second_stage_localization_loss_weight = (
      frcnn_config.second_stage_localization_loss_weight)
  second_stage_classification_loss = (
      losses_builder.build_faster_rcnn_classification_loss(
          frcnn_config.second_stage_classification_loss))
  second_stage_classification_loss_weight = (
      frcnn_config.second_stage_classification_loss_weight)
  second_stage_mask_prediction_loss_weight = (
      frcnn_config.second_stage_mask_prediction_loss_weight)

  hard_example_miner = None
  if frcnn_config.HasField('hard_example_miner'):
    hard_example_miner = losses_builder.build_hard_example_miner(
        frcnn_config.hard_example_miner,
        second_stage_classification_loss_weight,
        second_stage_localization_loss_weight)

  crop_and_resize_fn = (
      ops.matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize
      else ops.native_crop_and_resize)
  clip_anchors_to_image = (
      frcnn_config.clip_anchors_to_image)

  common_kwargs = {
      'is_training': is_training,
      'num_classes': num_classes,
      'image_resizer_fn': image_resizer_fn,
      'feature_extractor': feature_extractor,
      'number_of_stages': number_of_stages,
      'first_stage_anchor_generator': first_stage_anchor_generator,
      'first_stage_target_assigner': first_stage_target_assigner,
      'first_stage_atrous_rate': first_stage_atrous_rate,
      'first_stage_box_predictor_arg_scope_fn':
      first_stage_box_predictor_arg_scope_fn,
      'first_stage_box_predictor_kernel_size':
      first_stage_box_predictor_kernel_size,
      'first_stage_box_predictor_depth': first_stage_box_predictor_depth,
      'first_stage_minibatch_size': first_stage_minibatch_size,
      'first_stage_sampler': first_stage_sampler,
      'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn,
      'first_stage_max_proposals': first_stage_max_proposals,
      'first_stage_localization_loss_weight': first_stage_loc_loss_weight,
      'first_stage_objectness_loss_weight': first_stage_obj_loss_weight,
      'second_stage_target_assigner': second_stage_target_assigner,
      'second_stage_batch_size': second_stage_batch_size,
      'second_stage_sampler': second_stage_sampler,
      'second_stage_non_max_suppression_fn':
      second_stage_non_max_suppression_fn,
      'second_stage_score_conversion_fn': second_stage_score_conversion_fn,
      'second_stage_localization_loss_weight':
      second_stage_localization_loss_weight,
      'second_stage_classification_loss':
      second_stage_classification_loss,
      'second_stage_classification_loss_weight':
      second_stage_classification_loss_weight,
      'hard_example_miner': hard_example_miner,
      'add_summaries': add_summaries,
      'crop_and_resize_fn': crop_and_resize_fn,
      'clip_anchors_to_image': clip_anchors_to_image,
      'use_static_shapes': use_static_shapes,
      'resize_masks': frcnn_config.resize_masks
  }

  if isinstance(second_stage_box_predictor,
                rfcn_box_predictor.RfcnBoxPredictor):
    return rfcn_meta_arch.RFCNMetaArch(
        second_stage_rfcn_box_predictor=second_stage_box_predictor,
        **common_kwargs)
  else:
    return faster_rcnn_meta_arch.FasterRCNNMetaArch(
        initial_crop_size=initial_crop_size,
        maxpool_kernel_size=maxpool_kernel_size,
        maxpool_stride=maxpool_stride,
        second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
        second_stage_mask_prediction_loss_weight=(
            second_stage_mask_prediction_loss_weight),
        **common_kwargs)
def _build_ssd_model(ssd_config, is_training):
    """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.

  Returns:
    SSDMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = ssd_config.num_classes

    # Feature extractor
    feature_extractor = _build_ssd_feature_extractor(
        ssd_config.feature_extractor, is_training)

    box_coder = box_coder_builder.build(ssd_config.box_coder)
    matcher = matcher_builder.build(ssd_config.matcher)
    region_similarity_calculator = sim_calc.build(
        ssd_config.similarity_calculator)
    ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                    ssd_config.box_predictor,
                                                    is_training, num_classes)
    anchor_generator = anchor_generator_builder.build(
        ssd_config.anchor_generator)
    image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
    non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
        ssd_config.post_processing)
    (classification_loss, localization_loss, classification_weight,
     localization_weight,
     hard_example_miner) = losses_builder.build(ssd_config.loss)
    normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches

    common_kwargs = {
        'is_training': is_training,
        'anchor_generator': anchor_generator,
        'box_predictor': ssd_box_predictor,
        'box_coder': box_coder,
        'feature_extractor': feature_extractor,
        'matcher': matcher,
        'region_similarity_calculator': region_similarity_calculator,
        'image_resizer_fn': image_resizer_fn,
        'non_max_suppression_fn': non_max_suppression_fn,
        'score_conversion_fn': score_conversion_fn,
        'classification_loss': classification_loss,
        'localization_loss': localization_loss,
        'classification_loss_weight': classification_weight,
        'localization_loss_weight': localization_weight,
        'normalize_loss_by_num_matches': normalize_loss_by_num_matches,
        'hard_example_miner': hard_example_miner
    }

    if isinstance(anchor_generator,
                  yolo_grid_anchor_generator.YoloGridAnchorGenerator):
        return yolo_meta_arch.YOLOMetaArch(**common_kwargs)
    else:
        return ssd_meta_arch.SSDMetaArch(**common_kwargs)
Exemple #44
0
def _build_ssd_model(ssd_config, is_training, add_summaries):
  """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.
  Returns:
    SSDMetaArch based on the config.

  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = ssd_config.num_classes

  # Feature extractor
  feature_extractor = _build_ssd_feature_extractor(
      feature_extractor_config=ssd_config.feature_extractor,
      freeze_batchnorm=ssd_config.freeze_batchnorm,
      is_training=is_training)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)
  encode_background_as_zeros = ssd_config.encode_background_as_zeros
  negative_class_weight = ssd_config.negative_class_weight
  anchor_generator = anchor_generator_builder.build(
      ssd_config.anchor_generator)
  if feature_extractor.is_keras_model:
    ssd_box_predictor = box_predictor_builder.build_keras(
        conv_hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
        freeze_batchnorm=ssd_config.freeze_batchnorm,
        inplace_batchnorm_update=False,
        num_predictions_per_location_list=anchor_generator
        .num_anchors_per_location(),
        box_predictor_config=ssd_config.box_predictor,
        is_training=is_training,
        num_classes=num_classes,
        add_background_class=ssd_config.add_background_class)
  else:
    ssd_box_predictor = box_predictor_builder.build(
        hyperparams_builder.build, ssd_config.box_predictor, is_training,
        num_classes, ssd_config.add_background_class)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight, hard_example_miner,
   random_example_sampler) = losses_builder.build(ssd_config.loss)
  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
  normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize
  weight_regression_loss_by_score = (ssd_config.weight_regression_loss_by_score)

  target_assigner_instance = target_assigner.TargetAssigner(
      region_similarity_calculator,
      matcher,
      box_coder,
      negative_class_weight=negative_class_weight,
      weight_regression_loss_by_score=weight_regression_loss_by_score)

  expected_classification_loss_under_sampling = None
  if ssd_config.use_expected_classification_loss_under_sampling:
    expected_classification_loss_under_sampling = functools.partial(
        ops.expected_classification_loss_under_sampling,
        min_num_negative_samples=ssd_config.min_num_negative_samples,
        desired_negative_sampling_ratio=ssd_config.
        desired_negative_sampling_ratio)

  ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch

  return ssd_meta_arch_fn(
      is_training=is_training,
      anchor_generator=anchor_generator,
      box_predictor=ssd_box_predictor,
      box_coder=box_coder,
      feature_extractor=feature_extractor,
      encode_background_as_zeros=encode_background_as_zeros,
      image_resizer_fn=image_resizer_fn,
      non_max_suppression_fn=non_max_suppression_fn,
      score_conversion_fn=score_conversion_fn,
      classification_loss=classification_loss,
      localization_loss=localization_loss,
      classification_loss_weight=classification_weight,
      localization_loss_weight=localization_weight,
      normalize_loss_by_num_matches=normalize_loss_by_num_matches,
      hard_example_miner=hard_example_miner,
      target_assigner_instance=target_assigner_instance,
      add_summaries=add_summaries,
      normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
      freeze_batchnorm=ssd_config.freeze_batchnorm,
      inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
      add_background_class=ssd_config.add_background_class,
      random_example_sampler=random_example_sampler,
      expected_classification_loss_under_sampling=
      expected_classification_loss_under_sampling)
Exemple #45
0
def _build_lstm_model(ssd_config, lstm_config, is_training):
  """Builds an LSTM detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      LSTMSSDMetaArch.
    lstm_config: LstmModel config proto that specifies LSTM train/eval configs.
    is_training: True if this model is being built for training purposes.

  Returns:
    LSTMSSDMetaArch based on the config.
  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map), or if lstm_config.interleave_strategy is not recognized.
    ValueError: If unroll_length is not specified in the config file.
  """
  feature_extractor = _build_lstm_feature_extractor(
      ssd_config.feature_extractor, is_training, lstm_config)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)

  num_classes = ssd_config.num_classes
  ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                  ssd_config.box_predictor,
                                                  is_training, num_classes)
  anchor_generator = anchor_generator_builder.build(ssd_config.anchor_generator)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight, miner, _, _) = losses_builder.build(ssd_config.loss)

  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
  encode_background_as_zeros = ssd_config.encode_background_as_zeros
  negative_class_weight = ssd_config.negative_class_weight

  # Extra configs for lstm unroll length.
  unroll_length = None
  if 'lstm' in ssd_config.feature_extractor.type:
    if is_training:
      unroll_length = lstm_config.train_unroll_length
    else:
      unroll_length = lstm_config.eval_unroll_length
  if unroll_length is None:
    raise ValueError('No unroll length found in the config file')

  target_assigner_instance = target_assigner.TargetAssigner(
      region_similarity_calculator,
      matcher,
      box_coder,
      negative_class_weight=negative_class_weight)

  lstm_model = lstm_ssd_meta_arch.LSTMSSDMetaArch(
      is_training=is_training,
      anchor_generator=anchor_generator,
      box_predictor=ssd_box_predictor,
      box_coder=box_coder,
      feature_extractor=feature_extractor,
      encode_background_as_zeros=encode_background_as_zeros,
      image_resizer_fn=image_resizer_fn,
      non_max_suppression_fn=non_max_suppression_fn,
      score_conversion_fn=score_conversion_fn,
      classification_loss=classification_loss,
      localization_loss=localization_loss,
      classification_loss_weight=classification_weight,
      localization_loss_weight=localization_weight,
      normalize_loss_by_num_matches=normalize_loss_by_num_matches,
      hard_example_miner=miner,
      unroll_length=unroll_length,
      target_assigner_instance=target_assigner_instance)

  return lstm_model
Exemple #46
0
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
    """Builds a Faster R-CNN or R-FCN detection model based on the model config.

  Builds R-FCN model if the second_stage_box_predictor in the config is of type
  `rfcn_box_predictor` else builds a Faster R-CNN model.

  Args:
    frcnn_config: A faster_rcnn.proto object containing the config for the
      desired FasterRCNNMetaArch or RFCNMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.

  Returns:
    FasterRCNNMetaArch based on the config.

  Raises:
    ValueError: If frcnn_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
    num_classes = frcnn_config.num_classes
    image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)

    is_keras = (frcnn_config.feature_extractor.type
                in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP)

    if is_keras:
        feature_extractor = _build_faster_rcnn_keras_feature_extractor(
            frcnn_config.feature_extractor,
            is_training,
            inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
    else:
        feature_extractor = _build_faster_rcnn_feature_extractor(
            frcnn_config.feature_extractor,
            is_training,
            inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)

    number_of_stages = frcnn_config.number_of_stages
    first_stage_anchor_generator = anchor_generator_builder.build(
        frcnn_config.first_stage_anchor_generator)

    first_stage_target_assigner = target_assigner.create_target_assigner(
        'FasterRCNN',
        'proposal',
        use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
    first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
    if is_keras:
        first_stage_box_predictor_arg_scope_fn = (
            hyperparams_builder.KerasLayerHyperparams(
                frcnn_config.first_stage_box_predictor_conv_hyperparams))
    else:
        first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
            frcnn_config.first_stage_box_predictor_conv_hyperparams,
            is_training)
    first_stage_box_predictor_kernel_size = (
        frcnn_config.first_stage_box_predictor_kernel_size)
    first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
    first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
    use_static_shapes = frcnn_config.use_static_shapes and (
        frcnn_config.use_static_shapes_for_eval or is_training)
    first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
        positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
        is_static=(frcnn_config.use_static_balanced_label_sampler
                   and use_static_shapes))
    first_stage_max_proposals = frcnn_config.first_stage_max_proposals
    if (frcnn_config.first_stage_nms_iou_threshold < 0
            or frcnn_config.first_stage_nms_iou_threshold > 1.0):
        raise ValueError('iou_threshold not in [0, 1.0].')
    if (is_training and
            frcnn_config.second_stage_batch_size > first_stage_max_proposals):
        raise ValueError('second_stage_batch_size should be no greater than '
                         'first_stage_max_proposals.')
    first_stage_non_max_suppression_fn = functools.partial(
        post_processing.batch_multiclass_non_max_suppression,
        score_thresh=frcnn_config.first_stage_nms_score_threshold,
        iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
        max_size_per_class=frcnn_config.first_stage_max_proposals,
        max_total_size=frcnn_config.first_stage_max_proposals,
        use_static_shapes=use_static_shapes,
        use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage,
        use_combined_nms=frcnn_config.use_combined_nms_in_first_stage)
    first_stage_loc_loss_weight = (
        frcnn_config.first_stage_localization_loss_weight)
    first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight

    initial_crop_size = frcnn_config.initial_crop_size
    maxpool_kernel_size = frcnn_config.maxpool_kernel_size
    maxpool_stride = frcnn_config.maxpool_stride

    second_stage_target_assigner = target_assigner.create_target_assigner(
        'FasterRCNN',
        'detection',
        use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
    if is_keras:
        second_stage_box_predictor = box_predictor_builder.build_keras(
            hyperparams_builder.KerasLayerHyperparams,
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[1],
            box_predictor_config=frcnn_config.second_stage_box_predictor,
            is_training=is_training,
            num_classes=num_classes)
    else:
        second_stage_box_predictor = box_predictor_builder.build(
            hyperparams_builder.build,
            frcnn_config.second_stage_box_predictor,
            is_training=is_training,
            num_classes=num_classes)
    second_stage_batch_size = frcnn_config.second_stage_batch_size
    second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
        positive_fraction=frcnn_config.second_stage_balance_fraction,
        is_static=(frcnn_config.use_static_balanced_label_sampler
                   and use_static_shapes))
    (second_stage_non_max_suppression_fn,
     second_stage_score_conversion_fn) = post_processing_builder.build(
         frcnn_config.second_stage_post_processing)
    second_stage_localization_loss_weight = (
        frcnn_config.second_stage_localization_loss_weight)
    second_stage_classification_loss = (
        losses_builder.build_faster_rcnn_classification_loss(
            frcnn_config.second_stage_classification_loss))
    second_stage_classification_loss_weight = (
        frcnn_config.second_stage_classification_loss_weight)
    second_stage_mask_prediction_loss_weight = (
        frcnn_config.second_stage_mask_prediction_loss_weight)

    hard_example_miner = None
    if frcnn_config.HasField('hard_example_miner'):
        hard_example_miner = losses_builder.build_hard_example_miner(
            frcnn_config.hard_example_miner,
            second_stage_classification_loss_weight,
            second_stage_localization_loss_weight)

    crop_and_resize_fn = (ops.matmul_crop_and_resize
                          if frcnn_config.use_matmul_crop_and_resize else
                          ops.native_crop_and_resize)
    clip_anchors_to_image = (frcnn_config.clip_anchors_to_image)

    common_kwargs = {
        'is_training':
        is_training,
        'num_classes':
        num_classes,
        'image_resizer_fn':
        image_resizer_fn,
        'feature_extractor':
        feature_extractor,
        'number_of_stages':
        number_of_stages,
        'first_stage_anchor_generator':
        first_stage_anchor_generator,
        'first_stage_target_assigner':
        first_stage_target_assigner,
        'first_stage_atrous_rate':
        first_stage_atrous_rate,
        'first_stage_box_predictor_arg_scope_fn':
        first_stage_box_predictor_arg_scope_fn,
        'first_stage_box_predictor_kernel_size':
        first_stage_box_predictor_kernel_size,
        'first_stage_box_predictor_depth':
        first_stage_box_predictor_depth,
        'first_stage_minibatch_size':
        first_stage_minibatch_size,
        'first_stage_sampler':
        first_stage_sampler,
        'first_stage_non_max_suppression_fn':
        first_stage_non_max_suppression_fn,
        'first_stage_max_proposals':
        first_stage_max_proposals,
        'first_stage_localization_loss_weight':
        first_stage_loc_loss_weight,
        'first_stage_objectness_loss_weight':
        first_stage_obj_loss_weight,
        'second_stage_target_assigner':
        second_stage_target_assigner,
        'second_stage_batch_size':
        second_stage_batch_size,
        'second_stage_sampler':
        second_stage_sampler,
        'second_stage_non_max_suppression_fn':
        second_stage_non_max_suppression_fn,
        'second_stage_score_conversion_fn':
        second_stage_score_conversion_fn,
        'second_stage_localization_loss_weight':
        second_stage_localization_loss_weight,
        'second_stage_classification_loss':
        second_stage_classification_loss,
        'second_stage_classification_loss_weight':
        second_stage_classification_loss_weight,
        'hard_example_miner':
        hard_example_miner,
        'add_summaries':
        add_summaries,
        'crop_and_resize_fn':
        crop_and_resize_fn,
        'clip_anchors_to_image':
        clip_anchors_to_image,
        'use_static_shapes':
        use_static_shapes,
        'resize_masks':
        frcnn_config.resize_masks,
        'return_raw_detections_during_predict':
        (frcnn_config.return_raw_detections_during_predict)
    }

    if (isinstance(second_stage_box_predictor,
                   rfcn_box_predictor.RfcnBoxPredictor)
            or isinstance(second_stage_box_predictor,
                          rfcn_keras_box_predictor.RfcnKerasBoxPredictor)):
        return rfcn_meta_arch.RFCNMetaArch(
            second_stage_rfcn_box_predictor=second_stage_box_predictor,
            **common_kwargs)
    else:
        return faster_rcnn_meta_arch.FasterRCNNMetaArch(
            initial_crop_size=initial_crop_size,
            maxpool_kernel_size=maxpool_kernel_size,
            maxpool_stride=maxpool_stride,
            second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
            second_stage_mask_prediction_loss_weight=(
                second_stage_mask_prediction_loss_weight),
            **common_kwargs)
Exemple #47
0
def _build_ssd_model(ssd_config, is_training, add_summaries,
                     add_background_class=True):
  """Builds an SSD detection model based on the model config.

  Args:
    ssd_config: A ssd.proto object containing the config for the desired
      SSDMetaArch.
    is_training: True if this model is being built for training purposes.
    add_summaries: Whether to add tf summaries in the model.
    add_background_class: Whether to add an implicit background class to one-hot
      encodings of groundtruth labels. Set to false if using groundtruth labels
      with an explicit background class or using multiclass scores instead of
      truth in the case of distillation.
  Returns:
    SSDMetaArch based on the config.

  Raises:
    ValueError: If ssd_config.type is not recognized (i.e. not registered in
      model_class_map).
  """
  num_classes = ssd_config.num_classes

  # Feature extractor
  feature_extractor = _build_ssd_feature_extractor(
      feature_extractor_config=ssd_config.feature_extractor,
      is_training=is_training)

  box_coder = box_coder_builder.build(ssd_config.box_coder)
  matcher = matcher_builder.build(ssd_config.matcher)
  region_similarity_calculator = sim_calc.build(
      ssd_config.similarity_calculator)
  encode_background_as_zeros = ssd_config.encode_background_as_zeros
  negative_class_weight = ssd_config.negative_class_weight
  ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build,
                                                  ssd_config.box_predictor,
                                                  is_training, num_classes)
  anchor_generator = anchor_generator_builder.build(
      ssd_config.anchor_generator)
  image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
  non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
      ssd_config.post_processing)
  (classification_loss, localization_loss, classification_weight,
   localization_weight,
   hard_example_miner) = losses_builder.build(ssd_config.loss)
  normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
  normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize

  return ssd_meta_arch.SSDMetaArch(
      is_training,
      anchor_generator,
      ssd_box_predictor,
      box_coder,
      feature_extractor,
      matcher,
      region_similarity_calculator,
      encode_background_as_zeros,
      negative_class_weight,
      image_resizer_fn,
      non_max_suppression_fn,
      score_conversion_fn,
      classification_loss,
      localization_loss,
      classification_weight,
      localization_weight,
      normalize_loss_by_num_matches,
      hard_example_miner,
      add_summaries=add_summaries,
      normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
      freeze_batchnorm=ssd_config.freeze_batchnorm,
      inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
      add_background_class=add_background_class)