Exemple #1
0
    def test_serialize_deserialize(self, level, low_level, shared_decoder):
        """Validate the network can be serialized and deserialized."""
        num_classes = 10
        backbone = backbones.ResNet(model_id=50)

        semantic_decoder = aspp.ASPP(level=level, dilation_rates=[6, 12, 18])

        if shared_decoder:
            instance_decoder = semantic_decoder
        else:
            instance_decoder = aspp.ASPP(level=level,
                                         dilation_rates=[6, 12, 18])

        semantic_head = panoptic_deeplab_heads.SemanticHead(
            num_classes,
            level=level,
            low_level=low_level,
            low_level_num_filters=(64, 32))

        instance_head = panoptic_deeplab_heads.InstanceHead(
            level=level, low_level=low_level, low_level_num_filters=(64, 32))

        post_processor = panoptic_deeplab_merge.PostProcessor(
            output_size=[640, 640],
            center_score_threshold=0.1,
            thing_class_ids=[1, 2, 3, 4],
            label_divisor=[256],
            stuff_area_limit=4096,
            ignore_label=0,
            nms_kernel=41,
            keep_k_centers=41,
            rescale_predictions=True)

        model = panoptic_deeplab_model.PanopticDeeplabModel(
            backbone=backbone,
            semantic_decoder=semantic_decoder,
            instance_decoder=instance_decoder,
            semantic_head=semantic_head,
            instance_head=instance_head,
            post_processor=post_processor)

        config = model.get_config()
        new_model = panoptic_deeplab_model.PanopticDeeplabModel.from_config(
            config)

        # Validate that the config can be forced to JSON.
        _ = new_model.to_json()

        # If the serialization was successful, the new config should match the old.
        self.assertAllEqual(model.get_config(), new_model.get_config())
Exemple #2
0
    def test_serialize_deserialize(self):
        semantic_head = panoptic_deeplab_heads.SemanticHead(num_classes=2,
                                                            level=3)
        instance_head = panoptic_deeplab_heads.InstanceHead(level=3)

        semantic_head_config = semantic_head.get_config()
        instance_head_config = instance_head.get_config()

        new_semantic_head = panoptic_deeplab_heads.SemanticHead.from_config(
            semantic_head_config)
        new_instance_head = panoptic_deeplab_heads.InstanceHead.from_config(
            instance_head_config)

        self.assertAllEqual(semantic_head.get_config(),
                            new_semantic_head.get_config())
        self.assertAllEqual(instance_head.get_config(),
                            new_instance_head.get_config())
Exemple #3
0
    def test_forward(self, level, low_level, low_level_num_filters):
        backbone_features = {
            '3': np.random.rand(2, 128, 128, 16),
            '4': np.random.rand(2, 64, 64, 16),
            '5': np.random.rand(2, 32, 32, 16),
        }
        decoder_features = {
            '3': np.random.rand(2, 128, 128, 64),
            '4': np.random.rand(2, 64, 64, 64),
            '5': np.random.rand(2, 32, 32, 64),
            '6': np.random.rand(2, 16, 16, 64),
        }

        backbone_features['2'] = np.random.rand(2, 256, 256, 16)
        decoder_features['2'] = np.random.rand(2, 256, 256, 64)
        num_classes = 10
        semantic_head = panoptic_deeplab_heads.SemanticHead(
            num_classes=num_classes,
            level=level,
            low_level=low_level,
            low_level_num_filters=low_level_num_filters)

        instance_head = panoptic_deeplab_heads.InstanceHead(
            level=level,
            low_level=low_level,
            low_level_num_filters=low_level_num_filters)

        semantic_outputs = semantic_head((backbone_features, decoder_features))
        instance_outputs = instance_head((backbone_features, decoder_features))

        if str(level) in decoder_features:
            h, w = decoder_features[str(low_level[-1])].shape[1:3]
            self.assertAllEqual(semantic_outputs.numpy().shape,
                                [2, h, w, num_classes])
            self.assertAllEqual(
                instance_outputs['instance_centers_heatmap'].numpy().shape,
                [2, h, w, 1])
            self.assertAllEqual(
                instance_outputs['instance_centers_offset'].numpy().shape,
                [2, h, w, 2])
Exemple #4
0
def build_panoptic_deeplab(
    input_specs: tf.keras.layers.InputSpec,
    model_config: panoptic_deeplab_cfg.PanopticDeeplab,
    l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
    """Builds Panoptic Deeplab model.


  Args:
    input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
    model_config: Config instance for the panoptic deeplab model.
    l2_regularizer: Optional `tf.keras.regularizers.Regularizer`, if specified,
      the model is built with the provided regularization layer.
  Returns:
    tf.keras.Model for the panoptic segmentation model.
  """
    norm_activation_config = model_config.norm_activation
    backbone = backbones.factory.build_backbone(
        input_specs=input_specs,
        backbone_config=model_config.backbone,
        norm_activation_config=norm_activation_config,
        l2_regularizer=l2_regularizer)

    semantic_decoder = decoder_factory.build_decoder(
        input_specs=backbone.output_specs,
        model_config=model_config,
        l2_regularizer=l2_regularizer)

    if model_config.shared_decoder:
        instance_decoder = None
    else:
        # semantic and instance share the same decoder type
        instance_decoder = decoder_factory.build_decoder(
            input_specs=backbone.output_specs,
            model_config=model_config,
            l2_regularizer=l2_regularizer)

    semantic_head_config = model_config.semantic_head
    instance_head_config = model_config.instance_head

    semantic_head = panoptic_deeplab_heads.SemanticHead(
        num_classes=model_config.num_classes,
        level=semantic_head_config.level,
        num_convs=semantic_head_config.num_convs,
        kernel_size=semantic_head_config.kernel_size,
        prediction_kernel_size=semantic_head_config.prediction_kernel_size,
        num_filters=semantic_head_config.num_filters,
        use_depthwise_convolution=semantic_head_config.
        use_depthwise_convolution,
        upsample_factor=semantic_head_config.upsample_factor,
        low_level=semantic_head_config.low_level,
        low_level_num_filters=semantic_head_config.low_level_num_filters,
        fusion_num_output_filters=semantic_head_config.
        fusion_num_output_filters,
        activation=norm_activation_config.activation,
        use_sync_bn=norm_activation_config.use_sync_bn,
        norm_momentum=norm_activation_config.norm_momentum,
        norm_epsilon=norm_activation_config.norm_epsilon,
        kernel_regularizer=l2_regularizer)

    instance_head = panoptic_deeplab_heads.InstanceHead(
        level=instance_head_config.level,
        num_convs=instance_head_config.num_convs,
        kernel_size=instance_head_config.kernel_size,
        prediction_kernel_size=instance_head_config.prediction_kernel_size,
        num_filters=instance_head_config.num_filters,
        use_depthwise_convolution=instance_head_config.
        use_depthwise_convolution,
        upsample_factor=instance_head_config.upsample_factor,
        low_level=instance_head_config.low_level,
        low_level_num_filters=instance_head_config.low_level_num_filters,
        fusion_num_output_filters=instance_head_config.
        fusion_num_output_filters,
        activation=norm_activation_config.activation,
        use_sync_bn=norm_activation_config.use_sync_bn,
        norm_momentum=norm_activation_config.norm_momentum,
        norm_epsilon=norm_activation_config.norm_epsilon,
        kernel_regularizer=l2_regularizer)

    if model_config.generate_panoptic_masks:
        post_processing_config = model_config.post_processor
        post_processor = panoptic_deeplab_merge.PostProcessor(
            output_size=post_processing_config.output_size,
            center_score_threshold=post_processing_config.
            center_score_threshold,
            thing_class_ids=post_processing_config.thing_class_ids,
            label_divisor=post_processing_config.label_divisor,
            stuff_area_limit=post_processing_config.stuff_area_limit,
            ignore_label=post_processing_config.ignore_label,
            nms_kernel=post_processing_config.nms_kernel,
            keep_k_centers=post_processing_config.keep_k_centers,
            rescale_predictions=post_processing_config.rescale_predictions)
    else:
        post_processor = None

    model = panoptic_deeplab_model.PanopticDeeplabModel(
        backbone=backbone,
        semantic_decoder=semantic_decoder,
        instance_decoder=instance_decoder,
        semantic_head=semantic_head,
        instance_head=instance_head,
        post_processor=post_processor)

    return model
Exemple #5
0
    def test_panoptic_deeplab_network_creation(self, input_size, level,
                                               low_level, shared_decoder,
                                               training):
        """Test for creation of a panoptic deeplab network."""
        batch_size = 2 if training else 1
        num_classes = 10
        inputs = np.random.rand(batch_size, input_size, input_size, 3)

        image_info = tf.convert_to_tensor([[[input_size, input_size],
                                            [input_size, input_size], [1, 1],
                                            [0, 0]]])
        image_info = tf.tile(image_info, [batch_size, 1, 1])

        tf.keras.backend.set_image_data_format('channels_last')
        backbone = backbones.ResNet(model_id=50)

        semantic_decoder = aspp.ASPP(level=level, dilation_rates=[6, 12, 18])

        if shared_decoder:
            instance_decoder = semantic_decoder
        else:
            instance_decoder = aspp.ASPP(level=level,
                                         dilation_rates=[6, 12, 18])

        semantic_head = panoptic_deeplab_heads.SemanticHead(
            num_classes,
            level=level,
            low_level=low_level,
            low_level_num_filters=(64, 32))

        instance_head = panoptic_deeplab_heads.InstanceHead(
            level=level, low_level=low_level, low_level_num_filters=(64, 32))

        post_processor = panoptic_deeplab_merge.PostProcessor(
            output_size=[input_size, input_size],
            center_score_threshold=0.1,
            thing_class_ids=[1, 2, 3, 4],
            label_divisor=[256],
            stuff_area_limit=4096,
            ignore_label=0,
            nms_kernel=41,
            keep_k_centers=41,
            rescale_predictions=True)

        model = panoptic_deeplab_model.PanopticDeeplabModel(
            backbone=backbone,
            semantic_decoder=semantic_decoder,
            instance_decoder=instance_decoder,
            semantic_head=semantic_head,
            instance_head=instance_head,
            post_processor=post_processor)

        outputs = model(inputs=inputs,
                        image_info=image_info,
                        training=training)

        if training:
            self.assertIn('segmentation_outputs', outputs)
            self.assertIn('instance_centers_heatmap', outputs)
            self.assertIn('instance_centers_offset', outputs)

            self.assertAllEqual([
                2, input_size // (2**low_level[-1]), input_size //
                (2**low_level[-1]), num_classes
            ], outputs['segmentation_outputs'].numpy().shape)
            self.assertAllEqual([
                2, input_size // (2**low_level[-1]), input_size //
                (2**low_level[-1]), 1
            ], outputs['instance_centers_heatmap'].numpy().shape)
            self.assertAllEqual([
                2, input_size // (2**low_level[-1]), input_size //
                (2**low_level[-1]), 2
            ], outputs['instance_centers_offset'].numpy().shape)

        else:
            self.assertIn('panoptic_outputs', outputs)
            self.assertIn('category_mask', outputs)
            self.assertIn('instance_mask', outputs)
            self.assertIn('instance_centers', outputs)
            self.assertIn('instance_scores', outputs)
            self.assertIn('segmentation_outputs', outputs)