Пример #1
0
    def _single_gpu_build_func(model):
        """Builds the model on a single GPU. Can be called in a loop over GPUs
        with name and device scoping to create a data parallel model."""
        if model.train:
            train=model.train
            switch_to_teacher()
            model.train=False
            init_params=model.init_params
        
            with c2_utils.NamedTeacherScope():
                blobs, dim, spatial_scales = get_func(cfg.MODEL.CONV_BODY)(model)
                retinanet_heads.add_fpn_retinanet_outputs(
                    model, blobs, dim, spatial_scales
                )

            model.train=train
            model.init_params=init_params
            switch_to_student()
        
        blobs, dim, spatial_scales = add_conv_body_func(model)
        if not model.train:
            model.conv_body_net = model.net.Clone('conv_body_net')
        retinanet_heads.add_fpn_retinanet_outputs(
            model, blobs, dim, spatial_scales
        )
        if model.train:
            loss_gradients = retinanet_heads.add_fpn_retinanet_losses(
                model
            )
            loss_gradients_distill=retinanet_heads.add_distill_loss(model,'','teacher/')
            loss_gradients.update(loss_gradients_distill)

        return loss_gradients if model.train else None
Пример #2
0
 def _single_gpu_build_func(model):
     """Builds the model on a single GPU. Can be called in a loop over GPUs
     with name and device scoping to create a data parallel model."""
     blobs, dim, spatial_scales = add_conv_body_func(model)
     retinanet_heads.add_fpn_retinanet_outputs(model, blobs, dim,
                                               spatial_scales)
     if model.train:
         loss_gradients = retinanet_heads.add_fpn_retinanet_losses(model)
     return loss_gradients if model.train else None
Пример #3
0
 def _single_gpu_build_func(model):
     """Builds the model on a single GPU. Can be called in a loop over GPUs
     with name and device scoping to create a data parallel model."""
     blobs, dim, spatial_scales = add_conv_body_func(model)
     retinanet_heads.add_fpn_retinanet_outputs(
         model, blobs, dim, spatial_scales
     )
     if model.train:
         loss_gradients = retinanet_heads.add_fpn_retinanet_losses(
             model
         )
     return loss_gradients if model.train else None
Пример #4
0
    def _forward(self, data, im_info, roidb=None, **rpn_kwargs):
        im_data = data
        if self.training:
            roidb = list(map(lambda x: blob_utils.deserialize(x)[0], roidb))

        device_id = im_data.get_device()

        return_dict = {}  # A dict to collect return variables

        blob_conv = self.Conv_Body(im_data)

        # if self.training:
        #     # can be used to infer fg/bg ratio
        #     return_dict['rois_label'] = rpn_ret['labels_int32']

        if cfg.RPN.RPN_ON:
            rpn_ret = self.RPN(blob_conv, im_info, roidb)

        if cfg.FPN.FPN_ON and cfg.FAST_RCNN.ROI_BOX_HEAD is not '':
            # Retain only the blobs that will be used for RoI heads. `blob_conv` may include
            # extra blobs that are used for RPN proposals, but not for RoI
            # heads.
            blob_conv = blob_conv[-self.num_roi_levels:]

        if not self.training:
            return_dict['blob_conv'] = blob_conv

        if not cfg.MODEL.RPN_ONLY:
            if cfg.FAST_RCNN.ROI_BOX_HEAD is not '':
                if cfg.MODEL.SHARE_RES5 and self.training:
                    box_feat, res5_feat = self.Box_Head(blob_conv, rpn_ret)
                else:
                    box_feat = self.Box_Head(blob_conv, rpn_ret)
            else:
                box_feat = blob_conv
            cls_score, bbox_pred = self.Box_Outs(box_feat)
        else:
            # TODO: complete the returns for RPN only situation
            pass

        if self.training:
            return_dict['losses'] = {}
            return_dict['metrics'] = {}

            if cfg.FAST_RCNN.ROI_BOX_HEAD is not '':
                # rpn loss
                rpn_kwargs.update(dict((k, rpn_ret[k]) for k in rpn_ret.keys() if (
                    k.startswith('rpn_cls_logits') or k.startswith('rpn_bbox_pred'))))
                loss_rpn_cls, loss_rpn_bbox = rpn_heads.generic_rpn_losses(
                    **rpn_kwargs)
                if cfg.FPN.FPN_ON:
                    for i, lvl in enumerate(
                            range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1)):
                        return_dict['losses']['loss_rpn_cls_fpn%d' %
                                              lvl] = loss_rpn_cls[i]
                        return_dict['losses']['loss_rpn_bbox_fpn%d' %
                                              lvl] = loss_rpn_bbox[i]
                else:
                    return_dict['losses']['loss_rpn_cls'] = loss_rpn_cls
                    return_dict['losses']['loss_rpn_bbox'] = loss_rpn_bbox

                # bbox loss
                loss_cls, loss_bbox, accuracy_cls = fast_rcnn_heads.fast_rcnn_losses(
                    cls_score, bbox_pred, rpn_ret['labels_int32'], rpn_ret['bbox_targets'],
                    rpn_ret['bbox_inside_weights'], rpn_ret['bbox_outside_weights'])
                return_dict['losses']['loss_cls'] = loss_cls
                return_dict['losses']['loss_bbox'] = loss_bbox
                return_dict['metrics']['accuracy_cls'] = accuracy_cls

            if cfg.RETINANET.RETINANET_ON:
                loss_retnet_cls, loss_retnet_bbox = retinanet_heads.add_fpn_retinanet_losses(
                    cls_score, bbox_pred, **rpn_kwargs)
                for i, lvl in enumerate(
                        range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1)):
                    return_dict['losses']['loss_retnet_cls_fpn%d' %
                                          lvl] = loss_retnet_cls[i]
                    return_dict['losses']['loss_retnet_bbox_fpn%d' %
                                          lvl] = loss_retnet_bbox[i]

            if cfg.MODEL.MASK_ON:
                if getattr(self.Mask_Head, 'SHARE_RES5', False):
                    mask_feat = self.Mask_Head(
                        res5_feat, rpn_ret, roi_has_mask_int32=rpn_ret['roi_has_mask_int32'])
                else:
                    mask_feat = self.Mask_Head(blob_conv, rpn_ret)
                mask_pred = self.Mask_Outs(mask_feat)
                # return_dict['mask_pred'] = mask_pred
                # mask loss
                loss_mask = mask_rcnn_heads.mask_rcnn_losses(
                    mask_pred, rpn_ret['masks_int32'])
                return_dict['losses']['loss_mask'] = loss_mask

            if cfg.MODEL.KEYPOINTS_ON:
                if getattr(self.Keypoint_Head, 'SHARE_RES5', False):
                    # No corresponding keypoint head implemented yet (Neither in Detectron)
                    # Also, rpn need to generate the label
                    # 'roi_has_keypoints_int32'
                    kps_feat = self.Keypoint_Head(
                        res5_feat, rpn_ret, roi_has_keypoints_int32=rpn_ret['roi_has_keypoint_int32'])
                else:
                    kps_feat = self.Keypoint_Head(blob_conv, rpn_ret)
                kps_pred = self.Keypoint_Outs(kps_feat)
                # return_dict['keypoints_pred'] = kps_pred
                # keypoints loss
                if cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
                    loss_keypoints = keypoint_rcnn_heads.keypoint_losses(
                        kps_pred, rpn_ret['keypoint_locations_int32'], rpn_ret['keypoint_weights'])
                else:
                    loss_keypoints = keypoint_rcnn_heads.keypoint_losses(
                        kps_pred, rpn_ret['keypoint_locations_int32'], rpn_ret['keypoint_weights'],
                        rpn_ret['keypoint_loss_normalizer'])
                return_dict['losses']['loss_kps'] = loss_keypoints

            # pytorch0.4 bug on gathering scalar(0-dim) tensors
            for k, v in return_dict['losses'].items():
                return_dict['losses'][k] = v.unsqueeze(0)
            for k, v in return_dict['metrics'].items():
                return_dict['metrics'][k] = v.unsqueeze(0)

        else:
            # Testing
            if cfg.FAST_RCNN.ROI_BOX_HEAD is not '':
                return_dict['rois'] = rpn_ret['rois']
            return_dict['cls_score'] = cls_score
            return_dict['bbox_pred'] = bbox_pred

        return return_dict