예제 #1
0
def _add_roi_keypoint_head(
    model, add_roi_keypoint_head_func, blob_in, dim_in, spatial_scale_in
):
    """Add a keypoint prediction head to the model."""
    # Capture model graph before adding the mask head
    bbox_net = copy.deepcopy(model.net.Proto())
    # Add the keypoint head
    blob_keypoint_head, dim_keypoint_head = add_roi_keypoint_head_func(
        model, blob_in, dim_in, spatial_scale_in
    )
    # Add the keypoint output
    blob_keypoint = keypoint_rcnn_heads.add_keypoint_outputs(
        model, blob_keypoint_head, dim_keypoint_head
    )

    if not model.train:  # == inference
        # Inference uses a cascade of box predictions, then keypoint predictions
        # This requires separate nets for box and keypoint prediction.
        # So we extract the keypoint prediction net, store it as its own
        # network, then restore model.net to be the bbox-only network
        model.keypoint_net, keypoint_blob_out = c2_utils.SuffixNet(
            'keypoint_net', model.net, len(bbox_net.op), blob_keypoint
        )
        model.net._net = bbox_net
        loss_gradients = None
    else:
        loss_gradients = keypoint_rcnn_heads.add_keypoint_losses(model)
    return loss_gradients
예제 #2
0
def _add_roi_boundary_head(model, add_roi_boundary_head_func, blob_in, dim_in,
                           spatial_scale_in):
    """Add a boundary prediction head to the model."""
    # Capture model graph before adding the mask head
    bbox_net = copy.deepcopy(model.net.Proto())
    # Add the mask head
    blob_boundary_head, dim_boundary_head = add_roi_boundary_head_func(
        model, blob_in, dim_in, spatial_scale_in)
    # Add the mask output
    blob_boundary = boundary_heads.add_boundary_rcnn_outputs(
        model, blob_boundary_head, dim_boundary_head)

    if not model.train:  # == inference
        # Inference uses a cascade of box predictions, then mask predictions.
        # This requires separate nets for box and mask prediction.
        # So we extract the mask prediction net, store it as its own network,
        # then restore model.net to be the bbox-only network
        model.boundary_net, blob_boundary = c2_utils.SuffixNet(
            'boundary_net', model.net, len(bbox_net.op), blob_boundary)
        model.net._net = bbox_net
        loss_gradients = None
    else:
        loss_gradients = boundary_heads.add_boundary_rcnn_losses(
            model, blob_boundary)
    return loss_gradients, blob_boundary_head
예제 #3
0
def _add_semantic_segms_head(model, blob_in, dim_in, spatial_scale_in):
    """ Add the semantic segmentation head to the network """
    conv_body_net = copy.deepcopy(model.net.Proto())

    # Add semantic segms net's input
    rescale_factor = cfg.SEMANTIC_NET.RESCALE_FACTOR
    blob_rescale_feat, dim_rescale_feat = model.RescaleFeatureMap(
        blob_in,
        'img_rescale_feature',
        dim_in,
        rescale_factor=cfg.SEMANTIC_NET.RESCALE_FACTOR,
        spatial_scale=spatial_scale_in,
        sampling_ratio=cfg.SEMANTIC_NET.ROI_XFORM_SAMPLING_RATIO)
    # Add semantic segms net head
    blob_semantic_segms_head, dim_semantic_segms_head = \
        semantic_segms_head.add_semantic_segms_head(
            model, blob_rescale_feat, dim_rescale_feat,
        )
    # Add semantic segms net output
    blob_semantic_segms_out = semantic_segms_head.add_semantic_segms_outputs(
        model, blob_semantic_segms_head, dim_semantic_segms_head)

    # Add semantic segms loss
    if not model.train:  # inference
        model.semantic_segms_net, semantic_segms_out = c2_utils.SuffixNet(
            'semantic_segms_net', model.net, len(conv_body_net.op),
            blob_semantic_segms_out)
        model.net._net = conv_body_net
        loss_gradients = None
    else:
        loss_gradients = semantic_segms_head.add_semantic_segms_losses(
            model, blob_semantic_segms_out)
    return loss_gradients
예제 #4
0
def _add_prn_head(model, blob_in, dim_in, spatial_scale_in):
    """ Add a classification head to predict whether the roi needs further
    refinement
    prn mean predict refinement-needed
    """
    # Capture the model graph before adding the prn head
    bbox_net = copy.deepcopy(model.net.Proto())

    # First generate labels for prn head and update blobs for RefineNet
    if model.train:
        prn_heads.add_prn_labels(model)
    # Add the prediction heads
    prefix = 'mask'
    blob_prn_head, dim_prn_head = prn_heads.add_prn_head(
        model, blob_in, dim_in, spatial_scale_in, prefix)
    # Add the prediction output
    blob_prn_out = prn_heads.add_prn_outputs(model, blob_prn_head,
                                             dim_prn_head)

    if not model.train:  # == inference
        # Inference uses a cascade of box predictions, then roi mask predictions
        # then refine mask prediction. This requires separate nets for box and
        # mask and refine mask prediction.
        # So we extract the need refinement prediction net, store it as its own
        # network,then restore model.net to be the bbox-only network
        model.prn_net, prn_blob_out = c2_utils.SuffixNet(
            'prn_net', model.net, len(bbox_net.op), blob_prn_out)
        model.net._net = bbox_net
        loss_gradients = None
    else:
        loss_gradients = prn_heads.add_prn_losses(model)
    return loss_gradients
예제 #5
0
def _add_generic_refine_keypoint_net_head(model, blob_in, dim_in,
                                          spatial_scale_in):
    """Add a generic refine head to the model.
    Allows Mask/Keypoint indicator and Mask/Keypoint refined output
    """
    # Capture the model graph before adding the refine head
    bbox_net = copy.deepcopy(model.net.Proto())

    # Prepare for RefineNet input
    # Different indicator type will generate different blob_refine_net_in
    INDICATOR_TYPE = cfg.REFINENET.INDICATOR_TYPE
    blob_refine_net_in, dim_refine_net_in = \
        refine_net_heads.add_refine_net_inputs(
            model, blob_in, dim_in, spatial_scale_in, INDICATOR_TYPE
        )
    # Add RefineNet head
    prefix = 'keypoint'
    blob_refine_net_head, dim_refine_net_head = \
        refine_net_heads.add_refine_net_head(
            model, blob_refine_net_in, dim_refine_net_in, prefix
        )
    # Add RefineNet output
    # Different refined-output type will generate different output
    blob_refine_keypoint_out = refine_net_heads.add_refine_net_keypoint_outputs(
        model, blob_refine_net_head, dim_refine_net_head)

    if not model.train:  # == inference
        # Inference uses a cascade of box predictions, then roi mask predictions
        # then refine mask prediction. This requires separate nets for box and
        # mask and refine mask prediction.
        # So we extract the refine mask prediction net, store it as its own
        # network,then restore model.net to be the bbox-only network
        model.refine_keypoint_net, refine_blob_out = c2_utils.SuffixNet(
            'refine_keypoint_net', model.net, len(bbox_net.op),
            blob_refine_keypoint_out)
        model.net._net = bbox_net
        loss_gradients = None
    else:
        loss_gradients = refine_net_heads.add_refine_net_keypoint_losses(
            model, blob_refine_keypoint_out)
    return loss_gradients
예제 #6
0
def _add_roi_mask_head(model, add_roi_mask_head_func, blob_in, dim_in,
                       spatial_scale_in, blob_boundary_attention):
    """Add a mask prediction head to the model."""
    # Capture model graph before adding the mask head
    bbox_net = copy.deepcopy(model.net.Proto())
    # Add the mask head
    blob_mask_head, dim_mask_head = add_roi_mask_head_func(
        model, blob_in, dim_in, spatial_scale_in)

    # attention
    if cfg.MODEL.BOUNDARY_ON and cfg.BOUNDARY.CONCAT_MASK:
        # s = model.net.Sum([blob_mask_head, blob_boundary_attention], 'm_b_sum')
        bo = 'm_b_concat'
        concat_F, concat_dims = model.net.Concat(
            [blob_mask_head, blob_boundary_attention],
            [bo, "_" + bo + "_concat_dims"])
        concat_dims = dim_mask_head * 2
    else:
        concat_F = blob_mask_head
        concat_dims = dim_mask_head

    # Add the mask output
    blob_mask = mask_rcnn_heads.add_mask_rcnn_outputs(model, concat_F,
                                                      concat_dims)

    if not model.train:  # == inference
        # Inference uses a cascade of box predictions, then mask predictions.
        # This requires separate nets for box and mask prediction.
        # So we extract the mask prediction net, store it as its own network,
        # then restore model.net to be the bbox-only network
        model.mask_net, blob_mask = c2_utils.SuffixNet('mask_net', model.net,
                                                       len(bbox_net.op),
                                                       blob_mask)
        model.net._net = bbox_net
        loss_gradients = None
    else:
        loss_gradients = mask_rcnn_heads.add_mask_rcnn_losses(model, blob_mask)
    return loss_gradients