Esempio n. 1
0
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=model.GetLossScale())
    # add attribute loss
    if cfg.MODEL.ATTR:
        attr_prob, loss_attr = model.net.SoftmaxAttr(
            ['attr_score', 'attr_labels_int32'], ['attr_prob', 'loss_attr'],
            scale=model.GetLossScale() * cfg.MODEL.LOSS_ATTR,
            ignore=-1)
    loss_bbox = model.net.SmoothL1Loss([
        'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
        'bbox_outside_weights'
    ],
                                       'loss_bbox',
                                       scale=model.GetLossScale())
    if cfg.MODEL.ATTR:
        loss_gradients = blob_utils.get_loss_gradients(
            model, [loss_cls, loss_attr, loss_bbox])
        model.AddLosses(['loss_cls', 'loss_attr', 'loss_bbox'])
    else:
        loss_gradients = blob_utils.get_loss_gradients(model,
                                                       [loss_cls, loss_bbox])
        model.AddLosses(['loss_cls', 'loss_bbox'])

    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddMetrics('accuracy_cls')
    return loss_gradients
def add_single_scale_rpn_losses(model):
    """Add losses for a single scale RPN model (i.e., no FPN)."""
    # Spatially narrow the full-sized RPN label arrays to match the feature map
    # shape
    model.net.SpatialNarrowAs(
        ['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32'
    )
    for key in ('targets', 'inside_weights', 'outside_weights'):
        model.net.SpatialNarrowAs(
            ['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key
        )
    loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(
        ['rpn_cls_logits', 'rpn_labels_int32'],
        'loss_rpn_cls',
        scale=model.GetLossScale()
    )
    loss_rpn_bbox = model.net.SmoothL1Loss(
        [
            'rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights',
            'rpn_bbox_outside_weights'
        ],
        'loss_rpn_bbox',
        beta=1. / 9.,
        scale=model.GetLossScale()
    )
    loss_gradients = blob_utils.get_loss_gradients(
        model, [loss_rpn_cls, loss_rpn_bbox]
    )
    model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
    return loss_gradients
Esempio n. 3
0
def add_refine_net_keypoint_losses_gaussian(model, blob_refined_keypoint):
    """Add Mask R-CNN keypoint specific losses. Using MSE loss"""
    model.net.Alias(blob_refined_keypoint, 'refined_kps_prob')
    loss_refined_kps = model.net.MeanSquareLoss(
        [
            'refined_kps_prob', 'refined_keypoint_heatmaps',
            'refined_keypoint_weights'
        ],
        'loss_refined_kps',
        scale=cfg.REFINENET.KRCNN.LOSS_WEIGHT / cfg.NUM_GPUS)
    if not cfg.REFINENET.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
        # Discussion: the softmax loss above will average the loss by the sum of
        # keypoint_weights, i.e. the total number of visible keypoints. Since
        # the number of visible keypoints can vary significantly between
        # minibatches, this has the effect of up-weighting the importance of
        # minibatches with few visible keypoints. (Imagine the extreme case of
        # only one visible keypoint versus N: in the case of N, each one
        # contributes 1/N to the gradient compared to the single keypoint
        # determining the gradient direction). Instead, we can normalize the
        # loss by the total number of keypoints, if it were the case that all
        # keypoints were visible in a full minibatch. (Returning to the example,
        # this means that the one visible keypoint contributes as much as each
        # of the N keypoints.)
        model.StopGradient('refined_keypoint_loss_normalizer',
                           'refined_keypoint_loss_normalizer')
        loss_refined_kps = model.net.Mul(
            ['loss_refined_kps', 'refined_keypoint_loss_normalizer'],
            'loss_refined_kps_normalized')
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_refined_kps])
    model.AddLosses(loss_refined_kps)
    return loss_gradients
Esempio n. 4
0
def add_fpn_rpn_vis_losses(model):
    """ Note that this is shared with FPN3D.py. So this same loss function
    is used with 3D RPN head. """
    loss_gradients = {}
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        if (model.net.BlobIsDefined(scope.CurrentNameScope() +
                                    'rpn_vis_cls_logits_fpn' + slvl)):
            model.net.SpatialNarrowAs([
                'rpn_vis_labels_int32_wide_fpn' + slvl,
                'rpn_vis_cls_logits_fpn' + slvl
            ], 'rpn_vis_labels_int32_fpn' + slvl)
            loss_rpn_vis_cls_fpn = model.net.SigmoidCrossEntropyLoss(
                [
                    'rpn_vis_cls_logits_fpn' + slvl,
                    'rpn_vis_labels_int32_fpn' + slvl
                ],
                'loss_rpn_vis_cls_fpn' + slvl,
                normalize=0,
                scale=(1. / cfg.NUM_GPUS / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
                       cfg.TRAIN.IMS_PER_BATCH))
            loss_gradients.update(
                blob_utils.get_loss_gradients(model, [loss_rpn_vis_cls_fpn]))
            model.losses = list(
                set(model.losses + ['loss_rpn_vis_cls_fpn' + slvl]))
    return loss_gradients
Esempio n. 5
0
def add_single_scale_rpn_losses(model):
    """Add losses for a single scale RPN model (i.e., no FPN)."""
    # Spatially narrow the full-sized RPN label arrays to match the feature map
    # shape
    model.net.SpatialNarrowAs(
        ['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32'
    )
    for key in ('targets', 'inside_weights', 'outside_weights'):
        model.net.SpatialNarrowAs(
            ['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key
        )
    loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(
        ['rpn_cls_logits', 'rpn_labels_int32'],
        'loss_rpn_cls',
        scale=model.GetLossScale()
    )
    loss_rpn_bbox = model.net.SmoothL1Loss(
        [
            'rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights',
            'rpn_bbox_outside_weights'
        ],
        'loss_rpn_bbox',
        beta=1. / 9.,
        scale=model.GetLossScale()
    )
    loss_gradients = blob_utils.get_loss_gradients(
        model, [loss_rpn_cls, loss_rpn_bbox]
    )
    model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
    return loss_gradients
Esempio n. 6
0
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    if cfg.TRAIN.OHEM:
        cls_prob_per_roi, loss_cls_per_roi = model.net.SoftmaxWithLossOHEM(
            ['cls_score', 'labels_int32'],
            ['cls_prob_per_roi', 'loss_cls_per_roi'])
        loss_bbox_per_roi = model.net.SmoothL1LossOHEM([
            'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
            'bbox_outside_weights'
        ], 'loss_bbox_per_roi')
        per_roi_loss = model.net.Add(['loss_cls_per_roi', 'loss_bbox_per_roi'],
                                     'per_roi_loss')
        label_weights, bbox_outside_weights = model.net.BoxAnnotatorOHEM(
            ['rois', 'per_roi_loss', 'bbox_outside_weights'],
            ['label_weights', 'bbox_outside_weights_ohem'],
            roi_per_img=cfg.TRAIN.OHEM_ROI_PER_IMG)
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32', 'label_weights'] if cfg.TRAIN.OHEM else
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=model.GetLossScale())
    loss_bbox = model.net.SmoothL1Loss([
        'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
        'bbox_outside_weights_ohem'
    ] if cfg.TRAIN.OHEM else [
        'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
        'bbox_outside_weights'
    ],
                                       'loss_bbox',
                                       scale=model.GetBBoxLossScale())
    loss_gradients = blob_utils.get_loss_gradients(model,
                                                   [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls', 'loss_bbox'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients
Esempio n. 7
0
def add_fpn_retinanet_losses(model):
    loss_gradients = {}
    gradients, losses = [], []

    k_max = cfg.FPN.RPN_MAX_LEVEL  # coarsest level of pyramid
    k_min = cfg.FPN.RPN_MIN_LEVEL  # finest level of pyramid

    model.AddMetrics(['retnet_fg_num', 'retnet_bg_num'])
    # ==========================================================================
    # bbox regression loss - SelectSmoothL1Loss for multiple anchors at a location
    # ==========================================================================
    for lvl in range(k_min, k_max + 1):
        suffix = 'fpn{}'.format(lvl)
        bbox_loss = model.net.SelectSmoothL1Loss(
            [
                'retnet_bbox_pred_' + suffix, 'retnet_roi_bbox_targets_' +
                suffix, 'retnet_roi_fg_bbox_locs_' + suffix, 'retnet_fg_num'
            ],
            'retnet_loss_bbox_' + suffix,
            beta=cfg.RETINANET.BBOX_REG_BETA,
            scale=model.GetLossScale() * cfg.RETINANET.BBOX_REG_WEIGHT)
        gradients.append(bbox_loss)
        losses.append('retnet_loss_bbox_' + suffix)

    # ==========================================================================
    # cls loss - depends on softmax/sigmoid outputs
    # ==========================================================================
    for lvl in range(k_min, k_max + 1):
        suffix = 'fpn{}'.format(lvl)
        cls_lvl_logits = 'retnet_cls_pred_' + suffix
        if not cfg.RETINANET.SOFTMAX:
            cls_focal_loss = model.net.SigmoidFocalLoss(
                [
                    cls_lvl_logits, 'retnet_cls_labels_' + suffix,
                    'retnet_fg_num'
                ], ['fl_{}'.format(suffix)],
                gamma=cfg.RETINANET.LOSS_GAMMA,
                alpha=cfg.RETINANET.LOSS_ALPHA,
                scale=model.GetLossScale(),
                num_classes=model.num_classes - 1)
            gradients.append(cls_focal_loss)
            losses.append('fl_{}'.format(suffix))
        else:
            cls_focal_loss, gated_prob = model.net.SoftmaxFocalLoss(
                [
                    cls_lvl_logits, 'retnet_cls_labels_' + suffix,
                    'retnet_fg_num'
                ], ['fl_{}'.format(suffix), 'retnet_prob_{}'.format(suffix)],
                gamma=cfg.RETINANET.LOSS_GAMMA,
                alpha=cfg.RETINANET.LOSS_ALPHA,
                scale=model.GetLossScale(),
                num_classes=model.num_classes)
            gradients.append(cls_focal_loss)
            losses.append('fl_{}'.format(suffix))

    loss_gradients.update(blob_utils.get_loss_gradients(model, gradients))
    model.AddLosses(losses)
    return loss_gradients
Esempio n. 8
0
def add_mask_rcnn_losses(model, blob_mask):
    """Add Mask R-CNN specific losses."""
    loss_mask = model.net.SigmoidCrossEntropyLoss([blob_mask, 'masks_int32'],
                                                  'loss_mask',
                                                  scale=model.GetLossScale() *
                                                  cfg.MRCNN.WEIGHT_LOSS_MASK)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
    model.AddLosses('loss_mask')
    return loss_gradients
Esempio n. 9
0
def add_clsn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    loss_cls = model.net.SigmoidCrossEntropyLoss(
        ['cls_logits', 'labels_int32'], 'loss_cls', scale=model.GetLossScale())

    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls])
    model.AddLosses('loss_cls')

    return loss_gradients
Esempio n. 10
0
def add_boundary_rcnn_losses(model, blob_boundary):
    """Add Mask R-CNN specific losses."""
    loss_boundary = model.net.SigmoidCrossEntropyLoss(
        [blob_boundary, 'boundary_int32'],
        'loss_boundary',
        scale=model.GetLossScale() * cfg.BOUNDARY.WEIGHT_LOSS_MASK
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_boundary])
    model.AddLosses('loss_boundary')
    return loss_gradients
Esempio n. 11
0
def add_semantic_segms_losses(model, blob_semantic_segms):
    """Add Mask R-CNN specific losses."""
    loss_segmantic_segms = model.net.SigmoidCrossEntropyLoss(
        [blob_semantic_segms, 'semantic_segms_int32'],
        'loss_segmantic_segms',
        scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK)
    loss_gradients = blob_utils.get_loss_gradients(model,
                                                   [loss_segmantic_segms])
    model.AddLosses('loss_segmantic_segms')
    return loss_gradients
Esempio n. 12
0
def add_fast_rcnn_losses_class_only(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=model.GetLossScale())
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls])
    model.AddLosses(['loss_cls'])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddMetrics('accuracy_cls')
    return loss_gradients
Esempio n. 13
0
def add_mask_rcnn_losses(model, blob_mask):
    """Add Mask R-CNN specific losses."""
    loss_mask = model.net.SigmoidCrossEntropyLoss(
        [blob_mask, 'masks_int32'],
        'loss_mask',
        scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
    model.AddLosses('loss_mask')
    return loss_gradients
Esempio n. 14
0
def add_rpn_vis_losses(model, time_dim=1):
    # Spatially narrow the full-sized RPN label arrays to match the feature map
    # shape
    model.net.SpatialNarrowAs(
        ['rpn_vis_labels_int32_wide', 'rpn_vis_cls_logits'], 'rpn_vis_labels_int32')
    loss_rpn_vis_cls = model.net.SigmoidCrossEntropyLoss(
        ['rpn_vis_cls_logits', 'rpn_vis_labels_int32'], 'loss_rpn_vis_cls',
        scale=1. / cfg.NUM_GPUS / time_dim)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_rpn_vis_cls])
    model.losses = list(set(model.losses + ['loss_rpn_vis_cls']))
    return loss_gradients
Esempio n. 15
0
def add_fcn_losses(model, blob_mask):
    """Add FCN specific losses."""
    _, loss_mask = model.net.SoftmaxWithLoss(
        [blob_mask, 'masks_int32'],
        ['mask_prob', 'loss_mask'],
        scale=model.GetLossScale()
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
    model.Accuracy(['mask_prob', 'masks_int32'], 'accuracy_mask')
    model.AddLosses('loss_mask')
    model.AddMetrics('accuracy_mask')
    return loss_gradients
Esempio n. 16
0
def add_mask_rcnn_losses(model, blob_mask):
    if cfg.MRCNN.CLS_SPECIFIC_MASK:
        loss_mask = model.net.SigmoidCrossEntropyLoss(
            [blob_mask, 'masks_int32'], 'loss_mask',
            scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK)
    else:  # cls-agnostic; using sigmoid
        # logistic regression (binary)
        loss_mask = model.net.SigmoidCrossEntropyLoss(
            [blob_mask, 'masks_int32'], 'loss_mask',
            scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
    model.losses = list(set(model.losses + ['loss_mask']))
    return loss_gradients
Esempio n. 17
0
def add_fast_rcnn_losses(model, time_dim=1):
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=1. / cfg.NUM_GPUS)
    loss_bbox = model.net.SmoothL1Loss(
        ['bbox_pred', 'bbox_targets', 'bbox_inside_weights',
         'bbox_outside_weights'], 'loss_bbox',
        scale=1. / cfg.NUM_GPUS / time_dim)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.losses = list(set(model.losses + ['loss_cls', 'loss_bbox']))
    model.metrics = list(set(model.metrics + ['accuracy_cls']))
    return loss_gradients
Esempio n. 18
0
def add_mlp_losses(model):

    """Add losses for classification """

    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['logits', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=model.GetLossScale()
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients
Esempio n. 19
0
def add_loss(model, cls_score, loss_scale=1.0):
    cls_score_name = c2_utils.UnscopeGPUName(cls_score._name)
    cls_prob_name = cls_score_name.replace('cls_score', 'cls_prob')
    loss_cls_name = cls_score_name.replace('cls_score', 'loss_cls')
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        [cls_score, 'labels_int32'], [cls_prob_name, loss_cls_name],
        scale=model.GetLossScale() * loss_scale)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls])
    model.AddLosses([loss_cls])
    accuracy_cls_name = cls_score_name.replace('cls_score', 'accuracy_cls')
    model.Accuracy([cls_prob_name, 'labels_int32'], accuracy_cls_name)
    model.AddMetrics(accuracy_cls_name)

    return loss_gradients, cls_prob
Esempio n. 20
0
def add_prn_losses(model):
    """Add losses for RoI classification."""
    loss_prn = model.net.SigmoidCrossEntropyLoss(
        ['prn_logits', 'prn_labels_int32'],
        'loss_prn',
        scale=1. / cfg.NUM_GPUS)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_prn])
    model.AddLosses(['loss_prn'])
    # And add some useful metrics
    model.net.Sigmoid('prn_logits', 'prn_probs')
    model.SigmoidAccuracy(['prn_probs', 'prn_labels_int32'], 'accuracy_prn')
    model.AddMetrics('accuracy_prn')
    model.AddMetrics('refine_ratio')
    return loss_gradients
Esempio n. 21
0
def add_heatmap_losses(model, time_dim=1):
    # Reshape input from (N, K, H, W) to (NK, HW)
    model.net.Reshape(
        ['kps_score'], ['kps_score_reshaped', '_kps_score_old_shape'],
        shape=(-1, cfg.KRCNN.HEATMAP_SIZE * cfg.KRCNN.HEATMAP_SIZE))
    # Softmax across **space** (woahh....space!)
    kps_prob, loss_kps = model.net.SoftmaxWithLoss(
        ['kps_score_reshaped', 'keypoint_locations_int32', 'keypoint_weights'],
        ['kps_prob', 'loss_kps'],
        # DONOT scale the loss by time_dim! Somehow the values were same
        # for keypoints, whether I predict 17 or 51 keypoints.
        scale=cfg.KRCNN.LOSS_WEIGHT / cfg.NUM_GPUS,
        spatial=0)
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_kps])
    model.losses = list(set(model.losses + ['loss_kps']))
    return loss_gradients
Esempio n. 22
0
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=model.GetLossScale())
    loss_bbox = model.net.SmoothL1Loss([
        'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
        'bbox_outside_weights'
    ],
                                       'loss_bbox',
                                       scale=model.GetLossScale())
    loss_gradients = blob_utils.get_loss_gradients(model,
                                                   [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls', 'loss_bbox'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients
Esempio n. 23
0
def add_distill_loss(model, student_prefix, teacher_prefix):
    loss_gradients = {}
    gradients, losses = [], []

    k_max = cfg.FPN.RPN_MAX_LEVEL  # coarsest level of pyramid
    k_min = cfg.FPN.RPN_MIN_LEVEL  # finest level of pyramid

    if cfg.DISTILLATION.ADAPTIVE_NORMALIZER:
        cls_lvl_logits_power_sum_list = []
        for lvl in range(k_min, k_max + 1):
            suffix = 'fpn{}'.format(lvl)
            cls_lvl_logits = teacher_prefix + 'retnet_cls_prob_' + suffix
            cls_lvl_logits_power_sum_list.append(cls_lvl_logits)

        model.net.PowSum(cls_lvl_logits_power_sum_list,
                         student_prefix + 'distill_normalizer',
                         power=cfg.DISTILLATION.LOGITS_POWER)
        model.AddMetrics(student_prefix + 'distill_normalizer')

    for lvl in range(k_min, k_max + 1):
        suffix = 'fpn{}'.format(lvl)
        cls_lvl_logits = 'retnet_cls_pred_' + suffix
        cls_adaptive_distill_loss = model.net.SigmoidAdaptiveDistillLoss(
            [
                student_prefix + cls_lvl_logits,
                teacher_prefix + 'retnet_cls_prob_' + suffix,
                'retnet_cls_labels_' + suffix, student_prefix +
                ('retnet_fg_num' if not cfg.DISTILLATION.ADAPTIVE_NORMALIZER
                 else 'distill_normalizer')
            ], [student_prefix + 'fl_distill_{}'.format(suffix)],
            gamma=cfg.DISTILLATION.LOSS_GAMMA,
            alpha=cfg.DISTILLATION.LOSS_ALPHA,
            scale=model.GetLossScale() * cfg.DISTILLATION.TEMPERATURE *
            cfg.DISTILLATION.TEMPERATURE,
            beta=cfg.DISTILLATION.LOSS_BETA,
            num_classes=model.num_classes - 1,
            ignored_label=cfg.DISTILLATION.IGNORED_LABEL)

        gradients.append(cls_adaptive_distill_loss)
        losses.append(student_prefix + 'fl_distill_{}'.format(suffix))

    loss_gradients.update(blob_utils.get_loss_gradients(model, gradients))
    model.AddLosses(losses)
    return loss_gradients
Esempio n. 24
0
def add_refine_net_mask_losses(model, blob_refined_mask):
    """ Add RefineNet mask specific losses. """
    if cfg.MODEL.PIXEL_FOCAL_LOSS_ON:
        # using pixel level focal sigmoid cross entropy loss
        loss_refined_mask = model.net.MaskSigmoidFocalLoss(
            [blob_refined_mask, 'refined_masks_int32'],
            'loss_refined_mask',
            scale=1. / cfg.NUM_GPUS * cfg.REFINENET.WEIGHT_LOSS_MASK,
            gamma=cfg.PIXEL_FOCAL_LOSS.LOSS_GAMMA)
    elif cfg.REFINENET.ASSIGN_LARGER_WEIGHT_FOR_CROWDED_SAMPLES:
        loss_refined_mask = model.net.InstanceWeightedSigmoidCrossEntropyLoss(
            [blob_refined_mask, 'refined_masks_int32', 'loss_weights'],
            'loss_refined_mask',
            scale=1. / cfg.NUM_GPUS * cfg.REFINENET.WEIGHT_LOSS_MASK)
    else:
        # using normal sigmoid cross entropy loss
        loss_refined_mask = model.net.SigmoidCrossEntropyLoss(
            [blob_refined_mask, 'refined_masks_int32'],
            'loss_refined_mask',
            scale=1. / cfg.NUM_GPUS * cfg.REFINENET.WEIGHT_LOSS_MASK)

    loss_gradients = blob_utils.get_loss_gradients(model, [loss_refined_mask])
    model.AddLosses('loss_refined_mask')
    # # And adds MaskIoU ops
    # model.net.Sigmoid(blob_refined_mask, 'refined_mask_probs')
    # model.net.MaskIoU(
    #     ['refined_mask_probs', 'refined_masks_int32'],
    #     ['refined_mask_ious', 'mean_refined_mask_ious']
    # )
    # model.AddMetrics('mean_refined_mask_ious')
    # # And we also want to monitor the mask_ious before refined
    # if cfg.MODEL.PRN_ON:
    #     model.net.SampleAs(
    #         ['mask_ious', 'roi_needs_refine_int32'],
    #         ['prior_mask_ious']
    #     )
    #     model.net.ReduceFrontMean(
    #         'prior_mask_ious',
    #         'mean_prior_mask_ious',
    #         num_reduce_dim=1
    #     )
    #     model.AddMetrics('mean_prior_mask_ious')
    return loss_gradients
Esempio n. 25
0
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=1. / cfg.NUM_GPUS
    )
    loss_bbox = model.net.SmoothL1Loss(
        [
            'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
            'bbox_outside_weights'
        ],
        'loss_bbox',
        scale=1. / cfg.NUM_GPUS
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls', 'loss_bbox'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients
Esempio n. 26
0
def add_refine_net_keypoint_losses_softmax(model, blob_refined_keypoint):
    """Add Mask R-CNN keypoint specific losses."""
    # Reshape input from (N, K, H, W) to (NK, HW)
    model.net.Reshape(
        blob_refined_keypoint,
        ['refined_kps_score_reshaped', 'refined_kps_score_old_shape'],
        shape=(-1, cfg.REFINENET.KRCNN.HEATMAP_SIZE *
               cfg.REFINENET.KRCNN.HEATMAP_SIZE))
    # Softmax across **space** (woahh....space!)
    # Note: this is not what is commonly called "spatial softmax"
    # (i.e., softmax applied along the channel dimension at each spatial
    # location); This is softmax applied over a set of spatial locations (i.e.,
    # each spatial location is a "class").
    refined_kps_prob, loss_refined_kps = model.net.SoftmaxWithLoss(
        [
            'refined_kps_score_reshaped', 'refined_keypoint_locations_int32',
            'refined_keypoint_weights'
        ], ['refined_kps_prob', 'loss_refined_kps'],
        scale=cfg.REFINENET.KRCNN.LOSS_WEIGHT / cfg.NUM_GPUS,
        spatial=0)
    if not cfg.REFINENET.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
        # Discussion: the softmax loss above will average the loss by the sum of
        # keypoint_weights, i.e. the total number of visible keypoints. Since
        # the number of visible keypoints can vary significantly between
        # minibatches, this has the effect of up-weighting the importance of
        # minibatches with few visible keypoints. (Imagine the extreme case of
        # only one visible keypoint versus N: in the case of N, each one
        # contributes 1/N to the gradient compared to the single keypoint
        # determining the gradient direction). Instead, we can normalize the
        # loss by the total number of keypoints, if it were the case that all
        # keypoints were visible in a full minibatch. (Returning to the example,
        # this means that the one visible keypoint contributes as much as each
        # of the N keypoints.)
        model.StopGradient('refined_keypoint_loss_normalizer',
                           'refined_keypoint_loss_normalizer')
        loss_refined_kps = model.net.Mul(
            ['loss_refined_kps', 'refined_keypoint_loss_normalizer'],
            'loss_refined_kps_normalized')
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_refined_kps])
    model.AddLosses(loss_refined_kps)
    return loss_gradients
Esempio n. 27
0
def add_fpn_rpn_losses(model, time_dim=1):
    """ Note that this is shared with FPN3D.py. So this same loss function
    is used with 3D RPN head. """
    loss_gradients = {}
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        # Spatially narrow the full-sized RPN label arrays to match the feature map
        # shape
        model.net.SpatialNarrowAs(
            ['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
            'rpn_labels_int32_fpn' + slvl)
        for key in ('targets', 'inside_weights', 'outside_weights'):
            model.net.SpatialNarrowAs([
                'rpn_bbox_' + key + '_wide_fpn' + slvl,
                'rpn_bbox_pred_fpn' + slvl
            ], 'rpn_bbox_' + key + '_fpn' + slvl)
        loss_rpn_cls_fpn = model.net.SigmoidCrossEntropyLoss(
            ['rpn_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl],
            'loss_rpn_cls_fpn' + slvl,
            normalize=0,
            scale=(1. / cfg.NUM_GPUS / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
                   cfg.TRAIN.IMS_PER_BATCH))
        # Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
        # handled by (1) setting bbox outside weights and (2) SmoothL1Loss
        # normalizes by IMS_PER_BATCH
        loss_rpn_bbox_fpn = model.net.SmoothL1Loss([
            'rpn_bbox_pred_fpn' + slvl, 'rpn_bbox_targets_fpn' + slvl,
            'rpn_bbox_inside_weights_fpn' + slvl,
            'rpn_bbox_outside_weights_fpn' + slvl
        ],
                                                   'loss_rpn_bbox_fpn' + slvl,
                                                   beta=1. / 9.,
                                                   scale=1. / cfg.NUM_GPUS /
                                                   time_dim)
        loss_gradients.update(
            blob_utils.get_loss_gradients(
                model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn]))
        model.losses = list(
            set(model.losses +
                ['loss_rpn_cls_fpn' + slvl, 'loss_rpn_bbox_fpn' + slvl]))
    return loss_gradients
Esempio n. 28
0
def add_keypoint_losses(model):
    """Add Mask R-CNN keypoint specific losses."""
    # Reshape input from (N, K, H, W) to (NK, HW)
    model.net.Reshape(
        ['kps_score'], ['kps_score_reshaped', '_kps_score_old_shape'],
        shape=(-1, cfg.KRCNN.HEATMAP_SIZE * cfg.KRCNN.HEATMAP_SIZE)
    )
    # Softmax across **space** (woahh....space!)
    # Note: this is not what is commonly called "spatial softmax"
    # (i.e., softmax applied along the channel dimension at each spatial
    # location); This is softmax applied over a set of spatial locations (i.e.,
    # each spatial location is a "class").
    kps_prob, loss_kps = model.net.SoftmaxWithLoss(
        ['kps_score_reshaped', 'keypoint_locations_int32', 'keypoint_weights'],
        ['kps_prob', 'loss_kps'],
        scale=cfg.KRCNN.LOSS_WEIGHT / cfg.NUM_GPUS,
        spatial=0
    )
    if not cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
        # Discussion: the softmax loss above will average the loss by the sum of
        # keypoint_weights, i.e. the total number of visible keypoints. Since
        # the number of visible keypoints can vary significantly between
        # minibatches, this has the effect of up-weighting the importance of
        # minibatches with few visible keypoints. (Imagine the extreme case of
        # only one visible keypoint versus N: in the case of N, each one
        # contributes 1/N to the gradient compared to the single keypoint
        # determining the gradient direction). Instead, we can normalize the
        # loss by the total number of keypoints, if it were the case that all
        # keypoints were visible in a full minibatch. (Returning to the example,
        # this means that the one visible keypoint contributes as much as each
        # of the N keypoints.)
        model.StopGradient(
            'keypoint_loss_normalizer', 'keypoint_loss_normalizer'
        )
        loss_kps = model.net.Mul(
            ['loss_kps', 'keypoint_loss_normalizer'], 'loss_kps_normalized'
        )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_kps])
    model.AddLosses(loss_kps)
    return loss_gradients
Esempio n. 29
0
def add_fpn_rpn_losses(model, time_dim=1):
    """ Note that this is shared with FPN3D.py. So this same loss function
    is used with 3D RPN head. """
    loss_gradients = {}
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        # Spatially narrow the full-sized RPN label arrays to match the feature map
        # shape
        model.net.SpatialNarrowAs(
            ['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
            'rpn_labels_int32_fpn' + slvl)
        for key in ('targets', 'inside_weights', 'outside_weights'):
            model.net.SpatialNarrowAs(
                ['rpn_bbox_' + key + '_wide_fpn' + slvl,
                 'rpn_bbox_pred_fpn' + slvl],
                'rpn_bbox_' + key + '_fpn' + slvl)
        loss_rpn_cls_fpn = model.net.SigmoidCrossEntropyLoss(
            ['rpn_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl],
            'loss_rpn_cls_fpn' + slvl,
            normalize=0,
            scale=(1. / cfg.NUM_GPUS / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
                   cfg.TRAIN.IMS_PER_BATCH))
        # Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
        # handled by (1) setting bbox outside weights and (2) SmoothL1Loss
        # normalizes by IMS_PER_BATCH
        loss_rpn_bbox_fpn = model.net.SmoothL1Loss(
            ['rpn_bbox_pred_fpn' + slvl,
             'rpn_bbox_targets_fpn' + slvl,
             'rpn_bbox_inside_weights_fpn' + slvl,
             'rpn_bbox_outside_weights_fpn' + slvl],
            'loss_rpn_bbox_fpn' + slvl,
            beta=1. / 9.,
            scale=1. / cfg.NUM_GPUS / time_dim)
        loss_gradients.update(
            blob_utils.get_loss_gradients(
                model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn]))
        model.losses = list(
            set(model.losses +
                ['loss_rpn_cls_fpn' + slvl, 'loss_rpn_bbox_fpn' + slvl]))
    return loss_gradients
Esempio n. 30
0
def add_fpn_rpn_losses(model):
    """Add RPN on FPN specific losses."""
    loss_gradients = {}
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        # Spatially narrow the full-sized RPN label arrays to match the feature map
        # shape
        model.net.SpatialNarrowAs(
            ['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
            'rpn_labels_int32_fpn' + slvl)
        for key in ('targets', 'inside_weights', 'outside_weights'):
            model.net.SpatialNarrowAs([
                'rpn_bbox_' + key + '_wide_fpn' + slvl,
                'rpn_bbox_pred_fpn' + slvl
            ], 'rpn_bbox_' + key + '_fpn' + slvl)
        loss_rpn_cls_fpn = model.net.SigmoidCrossEntropyLoss(
            ['rpn_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl],
            'loss_rpn_cls_fpn' + slvl,
            normalize=0,
            scale=(model.GetLossScale() / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
                   cfg.TRAIN.IMS_PER_BATCH))
        # Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
        # handled by (1) setting bbox outside weights and (2) SmoothL1Loss
        # normalizes by IMS_PER_BATCH
        loss_rpn_bbox_fpn = model.net.SmoothL1Loss(
            [
                'rpn_bbox_pred_fpn' + slvl, 'rpn_bbox_targets_fpn' + slvl,
                'rpn_bbox_inside_weights_fpn' + slvl,
                'rpn_bbox_outside_weights_fpn' + slvl
            ],
            'loss_rpn_bbox_fpn' + slvl,
            beta=1. / 9.,
            scale=model.GetLossScale(),
        )
        loss_gradients.update(
            blob_utils.get_loss_gradients(
                model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn]))
        model.AddLosses(
            ['loss_rpn_cls_fpn' + slvl, 'loss_rpn_bbox_fpn' + slvl])
    return loss_gradients
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
            ['cls_score_voc', 'labels_int32', 'fpn_fg_num'], ['cls_prob', 'loss_cls'],
            gamma=cfg.RETINANET.LOSS_GAMMA,
            alpha=cfg.RETINANET.LOSS_ALPHA,
            scale=model.GetLossScale(),
            num_classes=cfg.MODEL.NUM_CLASSES - 1
        )
    loss_bbox = model.net.SmoothL1Loss(
        [
            'bbox_pred_voc', 'bbox_targets', 'bbox_inside_weights',
            'bbox_outside_weights'
        ],
        'loss_bbox',
        scale=model.GetLossScale()
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls', 'loss_bbox'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients
Esempio n. 32
0
def add_fpn_rpn_vis_losses(model):
    """ Note that this is shared with FPN3D.py. So this same loss function
    is used with 3D RPN head. """
    loss_gradients = {}
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        if (model.net.BlobIsDefined(scope.CurrentNameScope() +
                                    'rpn_vis_cls_logits_fpn' + slvl)):
            model.net.SpatialNarrowAs(
                ['rpn_vis_labels_int32_wide_fpn' + slvl,
                 'rpn_vis_cls_logits_fpn' + slvl],
                'rpn_vis_labels_int32_fpn' + slvl)
            loss_rpn_vis_cls_fpn = model.net.SigmoidCrossEntropyLoss(
                ['rpn_vis_cls_logits_fpn' + slvl,
                 'rpn_vis_labels_int32_fpn' + slvl],
                'loss_rpn_vis_cls_fpn' + slvl,
                normalize=0,
                scale=(1. / cfg.NUM_GPUS / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
                       cfg.TRAIN.IMS_PER_BATCH))
            loss_gradients.update(
                blob_utils.get_loss_gradients(model, [loss_rpn_vis_cls_fpn]))
            model.losses = list(set(model.losses + ['loss_rpn_vis_cls_fpn' + slvl]))
    return loss_gradients
Esempio n. 33
0
def add_rpn_losses(model, time_dim=1):
    # Spatially narrow the full-sized RPN label arrays to match the feature map
    # shape
    if not model.BlobExists('rpn_labels_int32'):
        model.net.SpatialNarrowAs(
            ['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32')
    for key in ('targets', 'inside_weights', 'outside_weights'):
        if not model.BlobExists('rpn_bbox_' + key):
            model.net.SpatialNarrowAs(
                ['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'],
                'rpn_bbox_' + key)
    loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(
        ['rpn_cls_logits', 'rpn_labels_int32'], 'loss_rpn_cls',
        scale=1. / cfg.NUM_GPUS)
    loss_rpn_bbox = model.net.SmoothL1Loss(
        ['rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights',
         'rpn_bbox_outside_weights'],
        'loss_rpn_bbox',
        beta=1. / 9.,
        scale=1. / cfg.NUM_GPUS / time_dim)
    loss_gradients = blob_utils.get_loss_gradients(
        model, [loss_rpn_cls, loss_rpn_bbox])
    model.losses = list(set(model.losses + ['loss_rpn_cls', 'loss_rpn_bbox']))
    return loss_gradients
Esempio n. 34
0
def create_model(model):
    logger.info(' | VGG-16 yall softmaxed triplet {}'.format(cfg.DATASET))

    model.loss_set = []

    # 1. visual modules
    blob, dim, spatial_scale = add_VGG16_conv5_body(model)

    # sbj and obj always share their branches
    blob_sbj, dim_sbj, spatial_scale_sbj = add_VGG16_roi_fc_head_labeled_shared(
        model, 'sbj', blob, dim, spatial_scale)

    blob_obj, dim_obj, spatial_scale_obj = add_VGG16_roi_fc_head_labeled_shared(
        model, 'obj', blob, dim, spatial_scale)

    blob_rel_prd, dim_rel_prd, \
        blob_rel_sbj, dim_rel_sbj, \
        blob_rel_obj, dim_rel_obj, \
        spatial_scale_rel = add_VGG16_roi_fc_head_rel_spo_late_fusion(
            model, blob, dim, spatial_scale)

    add_visual_embedding(model, blob_sbj, dim_sbj, blob_obj, dim_obj,
                         blob_rel_prd, dim_rel_prd, blob_rel_sbj, dim_rel_sbj,
                         blob_rel_obj, dim_rel_obj)

    add_embd_fusion_for_p(model)

    add_language_embedding_for_vocab(model)

    # During testing, get topk labels and scores
    if not model.train:
        add_labels_and_scores_topk(model, 'sbj')
        add_labels_and_scores_topk(model, 'obj')
        add_labels_and_scores_topk(model, 'rel')

    # # 2. language modules and losses
    if model.train:
        add_language_embedding_for_gt(model)

        add_embd_pos_neg_splits(model, 'sbj')
        add_embd_pos_neg_splits(model, 'obj')
        add_embd_pos_neg_splits(model, 'rel')

        # define several helper blobs
        sbj_margin = cfg.TRAIN.MARGIN_SO
        obj_margin = cfg.TRAIN.MARGIN_SO
        rel_margin = cfg.TRAIN.MARGIN_P
        model.net.ConstantFill([],
                               'margin_blob_sbj',
                               shape=[1],
                               value=sbj_margin)
        model.net.ConstantFill([],
                               'margin_blob_obj',
                               shape=[1],
                               value=obj_margin)
        model.net.ConstantFill([],
                               'margin_blob_rel',
                               shape=[1],
                               value=rel_margin)
        model.net.ConstantFill([], 'one_blob', shape=[1], value=1.0)

        add_embd_triplet_losses_labeled(model, 'sbj')
        add_embd_triplet_losses_labeled(model, 'obj')
        add_embd_triplet_losses_labeled(model, 'rel')

    loss_gradients = blob_utils.get_loss_gradients(model, model.loss_set)
    model.AddLosses(model.loss_set)
    return loss_gradients if model.train else None
Esempio n. 35
0
def add_mask_rcnn_losses(model, blob_mask):
    """Add Mask R-CNN specific losses."""
    if not cfg.MODEL.INDICATOR_LOSS_ON:
        # Whether using pixel level focal loss
        if cfg.MODEL.PIXEL_FOCAL_LOSS_ON:
            # using pixel focal sigmoid cross entropy loss
            loss_mask = model.net.MaskSigmoidFocalLoss(
                [blob_mask, 'masks_int32'],
                'loss_mask',
                scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK,
                gamma=cfg.PIXEL_FOCAL_LOSS.LOSS_GAMMA)
        elif cfg.MODEL.WEIGHTED_SIGMOID_LOSS_ON:
            # using normal sigmoid cross entropy loss
            loss_mask = model.net.WeightedSigmoidCrossEntropyLoss(
                [blob_mask, 'masks_int32'],
                'loss_mask',
                pos_weight=cfg.WEIGHTED_SIGMOID_LOSS.POSITIVE_WEIGHT,
                neg_weight=cfg.WEIGHTED_SIGMOID_LOSS.NEGATIVE_WEIGHT,
                scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK)
        else:
            # using normal sigmoid cross entropy loss
            loss_mask = model.net.SigmoidCrossEntropyLoss(
                [blob_mask, 'masks_int32'],
                'loss_mask',
                scale=1. / cfg.NUM_GPUS * cfg.MRCNN.WEIGHT_LOSS_MASK)

        loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
        model.AddLosses('loss_mask')
    else:
        # using indicator loss
        # Add encouragment losses.
        loss_mask = model.net.SigmoidCrossEntropyLoss(
            [blob_mask, 'masks_int32'],
            'loss_mask',
            scale=1. / cfg.NUM_GPUS * cfg.REFINENET.WEIGHT_LOSS_ENCOURAGE)

        # Add Indicator loss
        if cfg.MODEL.INDICATOR_HINGLE_LOSS_ON:
            # Use Hinge Loss
            loss_indicator = model.net.ThresholdSigmoidHingleLoss(
                [blob_mask, 'masks_int32'],
                'loss_indicator',
                scale=1. / cfg.NUM_GPUS,
                low_threshold=cfg.REFINENET.
                INDICATOR_HINGLE_LOSS_LOW_THRESHOLD,
                high_threshold=cfg.REFINENET.
                INDICATOR_HINGLE_LOSS_HIGH_THRESHOLD)
        elif cfg.MODEL.INDICATOR_NEGATIVE_SIGMOID_LOSS_ON:
            # Use Negative Sigmoid Cross Entropy Loss:
            loss_indicator = model.net.NegativeSigmoidCrossEntropyLoss(
                [blob_mask, 'masks_int32'],
                'loss_indicator',
                scale=1. / cfg.NUM_GPUS)
        else:
            # Use threshold sigmoid cross entropy loss
            loss_indicator = model.net.ThresholdSigmoidCrossEntropyLoss(
                [blob_mask, 'masks_int32'],
                'loss_indicator',
                scale=1. / cfg.NUM_GPUS,
                threshold=cfg.REFINENET.INDICATOR_LOSS_THRESHOLD)

        loss_gradients = blob_utils.get_loss_gradients(
            model, [loss_mask, loss_indicator])
        model.AddLosses(['loss_mask', 'loss_indicator'])

    # # Add a mask iou op to metrics
    # model.net.Sigmoid('mask_fcn_logits', 'mask_probs')
    # model.net.MaskIoU(
    #     ['mask_probs', 'masks_int32'],
    #     ['mask_ious', 'mean_mask_ious']
    # )
    # model.AddMetrics('mean_mask_ious')

    return loss_gradients
Esempio n. 36
0
def add_fpn_retinanet_losses(model):
    loss_gradients = {}
    gradients, losses = [], []

    k_max = cfg.FPN.RPN_MAX_LEVEL  # coarsest level of pyramid
    k_min = cfg.FPN.RPN_MIN_LEVEL  # finest level of pyramid

    model.AddMetrics(['retnet_fg_num', 'retnet_bg_num'])
    # ==========================================================================
    # bbox regression loss - SelectSmoothL1Loss for multiple anchors at a location
    # ==========================================================================
    for lvl in range(k_min, k_max + 1):
        suffix = 'fpn{}'.format(lvl)
        bbox_loss = model.net.SelectSmoothL1Loss(
            [
                'retnet_bbox_pred_' + suffix,
                'retnet_roi_bbox_targets_' + suffix,
                'retnet_roi_fg_bbox_locs_' + suffix, 'retnet_fg_num'
            ],
            'retnet_loss_bbox_' + suffix,
            beta=cfg.RETINANET.BBOX_REG_BETA,
            scale=1. / cfg.NUM_GPUS * cfg.RETINANET.BBOX_REG_WEIGHT
        )
        gradients.append(bbox_loss)
        losses.append('retnet_loss_bbox_' + suffix)

    # ==========================================================================
    # cls loss - depends on softmax/sigmoid outputs
    # ==========================================================================
    for lvl in range(k_min, k_max + 1):
        suffix = 'fpn{}'.format(lvl)
        cls_lvl_logits = 'retnet_cls_pred_' + suffix
        if not cfg.RETINANET.SOFTMAX:
            cls_focal_loss = model.net.SigmoidFocalLoss(
                [
                    cls_lvl_logits, 'retnet_cls_labels_' + suffix,
                    'retnet_fg_num'
                ],
                ['fl_{}'.format(suffix)],
                gamma=cfg.RETINANET.LOSS_GAMMA,
                alpha=cfg.RETINANET.LOSS_ALPHA,
                scale=(1. / cfg.NUM_GPUS)
            )
            gradients.append(cls_focal_loss)
            losses.append('fl_{}'.format(suffix))
        else:
            cls_focal_loss, gated_prob = model.net.SoftmaxFocalLoss(
                [
                    cls_lvl_logits, 'retnet_cls_labels_' + suffix,
                    'retnet_fg_num'
                ],
                ['fl_{}'.format(suffix), 'retnet_prob_{}'.format(suffix)],
                gamma=cfg.RETINANET.LOSS_GAMMA,
                alpha=cfg.RETINANET.LOSS_ALPHA,
                scale=(1. / cfg.NUM_GPUS),
            )
            gradients.append(cls_focal_loss)
            losses.append('fl_{}'.format(suffix))

    loss_gradients.update(blob_utils.get_loss_gradients(model, gradients))
    model.AddLosses(losses)
    return loss_gradients
Esempio n. 37
0
def add_fpn_focal_losses(model):
    """Add RPN on FPN specific losses."""
    loss_gradients = {}
    gradients, losses = [], []

    # model.AddMetrics(['fpn_fg_num', 'fpn_bg_num'])
    # ==========================================================================
    # bbox regression loss - SelectSmoothL1Loss for multiple anchors at a location
    # ==========================================================================
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        suffix = 'fpn{}'.format(lvl)
        # Spatially narrow the full-sized RPN label arrays to match the feature map
        # shape

        # model.net.SpatialNarrowAs(
        #     ['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl,'fpn_fg_num'],
        #     'rpn_labels_int32_fpn' + slvl
        # )

        model.net.SpatialNarrowAs(
            ['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
            'rpn_labels_int32_fpn' + slvl)
        for key in ('targets', 'inside_weights', 'outside_weights'):
            model.net.SpatialNarrowAs([
                'rpn_bbox_' + key + '_wide_fpn' + slvl,
                'rpn_bbox_pred_fpn' + slvl
            ], 'rpn_bbox_' + key + '_fpn' + slvl)
        # Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
        # handled by (1) setting bbox outside weights and (2) SmoothL1Loss
        # normalizes by IMS_PER_BATCH
        loss_rpn_bbox_fpn = model.net.SelectSmoothL1Loss(
            [
                'rpn_bbox_pred_fpn' + slvl, 'rpn_bbox_targets_fpn' + slvl,
                'rpn_bbox_inside_weights_fpn' + slvl,
                'rpn_bbox_outside_weights_fpn' + slvl
            ],
            'loss_rpn_bbox_fpn' + slvl,
            beta=1. / 9.,
            scale=model.GetLossScale(),
        )
        gradients.append(loss_rpn_bbox_fpn)
        losses.append('loss_rpn_bbox_fpn' + slvl)
    # ==========================================================================
    # cls loss - depends on softmax/sigmoid outputs
    # ==========================================================================
    for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
        slvl = str(lvl)
        loss_rpn_cls_fpn = model.net.SigmoidFocalLoss(
            [
                'rpn_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl,
                'rpn_cls_probs_fpn' + slvl
            ], ['loss_rpn_cls_fpn' + slvl],
            gamma=cfg.FOCAL_LOSS.LOSS_GAMMA,
            alpha=cfg.FOCAL_LOSS.LOSS_ALPHA,
            scale=(model.GetLossScale() / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
                   cfg.TRAIN.IMS_PER_BATCH),
            num_classes=cfg.MODEL.NUM_CLASSES - 1)

        gradients.append(loss_rpn_cls_fpn)
        losses.append('loss_rpn_cls_fpn' + slvl)

    loss_gradients.update(
        blob_utils.get_loss_gradients(model,
                                      [loss_rpn_cls_fpn, loss_rpn_bbox_fpn]))
    model.AddLosses(losses)
    return loss_gradients