def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        # self.rpn_batch_size = model_config['rpn_batch_size']
        self.num_cls_samples = model_config['num_cls_samples']
        self.num_reg_samples = model_config['num_reg_samples']
        self.use_focal_loss = model_config['use_focal_loss']

        # sampler
        # self.sampler = HardNegativeSampler({"fg_fraction": 1.0})
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler({"fg_fraction": 1.0})

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

        self.use_iou = model_config.get('use_iou')
    def init_param(self, model_config):
        classes = model_config['classes']
        self.classes = classes
        self.n_classes = len(classes)
        self.class_agnostic = model_config['class_agnostic']
        self.pooling_size = model_config['pooling_size']
        self.pooling_mode = model_config['pooling_mode']
        self.crop_resize_with_max_pool = model_config[
            'crop_resize_with_max_pool']
        self.truncated = model_config['truncated']
        self.use_self_attention = model_config.get('use_self_attention')

        self.use_focal_loss = model_config['use_focal_loss']
        self.subsample_twice = model_config['subsample_twice']
        self.rcnn_batch_size = model_config['rcnn_batch_size']

        # some submodule config
        self.feature_extractor_config = model_config[
            'feature_extractor_config']
        self.rpn_config = model_config['rpn_config']

        # assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # sampler
        # self.bbox_sampler = DetectionSampler({'fg_fraction': 0.5})
        # self.bbox_sampler = HardNegativeSampler({'fg_fraction': 1})
        # self.bbox_sampler = BalancedSampler({'fg_fraction': 1})
        # self.iou_sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler({'fg_fraction': 1})
Example #3
0
    def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        self.rpn_batch_size = model_config['rpn_batch_size']
        self.use_focal_loss = model_config['use_focal_loss']
        self.iou_criterion = model_config['iou_criterion']
        self.use_iox = model_config['use_iox']
        self.theta = 1.0
        self.alpha = 0.6

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = LEDTargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder
Example #4
0
    def init_param(self, model_config):
        self.outside_window_filter = model_config.get('outside_window_filter',
                                                      False)
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        self.rpn_batch_size = model_config['rpn_batch_size']
        self.use_focal_loss = model_config['use_focal_loss']
        self.clip_boxes = model_config.get('clip_boxes', True)

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

        self.use_iou = model_config.get('use_iou')
    def init_param(self, model_config):
        classes = model_config['classes']
        self.classes = classes
        self.n_classes = len(classes)
        self.class_agnostic = model_config['class_agnostic']
        self.pooling_size = model_config['pooling_size']
        self.pooling_mode = model_config['pooling_mode']
        self.crop_resize_with_max_pool = model_config[
            'crop_resize_with_max_pool']
        self.truncated = model_config['truncated']
        self.theta = 1.0
        self.alpha = 0.6

        self.use_focal_loss = model_config['use_focal_loss']
        self.subsample_twice = model_config['subsample_twice']
        self.rcnn_batch_size = model_config['rcnn_batch_size']
        self.iou_criterion = model_config['iou_criterion']
        self.use_iox = model_config['use_iox']
        # self.use_cls_pred = model_config['use_cls_pred']

        # some submodule config
        self.feature_extractor_config = model_config[
            'feature_extractor_config']
        self.rpn_config = model_config['rpn_config']

        # assigner
        self.target_assigner = LEDTargetAssigner(
            model_config['target_assigner_config'])

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        if self.iou_criterion:
            self.sampler = DetectionSampler(model_config['sampler_config'])
        else:
            self.sampler = BalancedSampler(model_config['sampler_config'])
Example #6
0
    def init_param(self, model_config):
        self.feature_extractor_config = model_config['feature_extractor_config']
        self.multibox_cfg = [3, 3, 3, 3, 3, 3]
        self.n_classes = len(model_config['classes'])
        self.sampler = DetectionSampler(model_config['sampler_config'])
        self.batch_size = model_config['batch_size']
        self.use_focal_loss = model_config['use_focal_loss']
        # self.multibox_cfg = model_config['multibox_config']

        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # import ipdb
        # ipdb.set_trace()
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])

        self.bbox_coder = self.target_assigner.bbox_coder
Example #7
0
    def init_param(self, model_config):

        self.feat_size = model_config['common_feat_size']
        self.batch_size = model_config['batch_size']
        self.sample_size = model_config['sample_size']
        self.pooling_size = model_config['pooling_size']
        self.n_classes = model_config['num_classes']
        self.use_focal_loss = model_config['use_focal_loss']
        self.feature_extractor_config = model_config[
            'feature_extractor_config']

        self.voxel_generator = VoxelGenerator(
            model_config['voxel_generator_config'])
        self.voxel_generator.init_voxels()

        self.integral_map_generator = IntegralMapGenerator()

        self.oft_target_assigner = OFTargetAssigner(
            model_config['target_assigner_config'])

        self.target_assigner = TargetAssigner(
            model_config['eval_target_assigner_config'])
        self.target_assigner.analyzer.append_gt = False

        self.sampler = DetectionSampler(model_config['sampler_config'])

        self.bbox_coder = self.oft_target_assigner.bbox_coder

        # find the most expensive operators
        self.profiler = Profiler()

        # self.multibin = model_config['multibin']
        self.num_bins = model_config['num_bins']

        self.reg_channels = 3 + 3 + self.num_bins * 4

        # score, pos, dim, ang
        self.rcnn_output_channels = self.n_classes + self.reg_channels

        self.rpn_output_channels = 2 + 3 + 3

        nms_deltas = model_config.get('nms_deltas')
        if nms_deltas is None:
            nms_deltas = 1
        self.nms_deltas = nms_deltas
class OrgOHEMThreeIoUFasterRCNN(Model):
    def forward(self, feed_dict):
        # import ipdb
        # ipdb.set_trace()

        prediction_dict = {}

        # base model
        base_feat = self.feature_extractor.first_stage_feature(
            feed_dict['img'])
        feed_dict.update({'base_feat': base_feat})
        # batch_size = base_feat.shape[0]

        # rpn model
        prediction_dict.update(self.rpn_model.forward(feed_dict))

        # proposals = prediction_dict['proposals_batch']
        # shape(N,num_proposals,5)
        # pre subsample for reduce consume of memory
        # if self.training:
        # self.pre_subsample(prediction_dict, feed_dict)
        rois_batch = prediction_dict['rois_batch']

        # note here base_feat (N,C,H,W),rois_batch (N,num_proposals,5)
        pooled_feat = self.rcnn_pooling(base_feat, rois_batch.view(-1, 5))

        # shape(N,C,1,1)
        pooled_feat = self.feature_extractor.second_stage_feature(pooled_feat)
        ########################################
        # semantic map
        ########################################
        # no necessary for iou
        pooled_feat_cls = pooled_feat.mean(3).mean(2)
        rcnn_cls_scores = self.rcnn_cls_pred(pooled_feat_cls)
        # rcnn_cls_scores = rcnn_cls_scores_map.mean(3).mean(2)
        # saliency_map = F.softmax(rcnn_cls_scores_map, dim=1)
        #  rcnn_cls_probs = F.softmax(rcnn_cls_scores, dim=1)
        # shape(N,C)
        # rcnn_bbox_feat = pooled_feat * saliency_map[:, 1:, :, :]
        # rcnn_bbox_feat = rcnn_bbox_feat.mean(3).mean(2)

        # self attention
        # import ipdb
        # ipdb.set_trace()
        # if self.use_self_attention:
        # channel_attention = self.generate_channel_attention(pooled_feat)
        # spatial_attention = self.generate_spatial_attention(pooled_feat)
        # pooled_feat_reg = pooled_feat * channel_attention
        # pooled_feat_reg = pooled_feat * spatial_attention
        # pooled_feat_reg = pooled_feat_reg.mean(3).mean(2)
        # rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat_reg)
        # else:
        # rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat_cls)
        # shape(N,C)
        # pooled_feat = pooled_feat.mean(3).mean(2)

        # rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat)
        # rcnn_cls_scores = self.rcnn_cls_pred(pooled_feat)

        # rcnn_cls_probs = F.softmax(rcnn_cls_scores, dim=1)

        rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat)
        rcnn_bbox_preds = rcnn_bbox_preds.view(rcnn_bbox_preds.shape[0] * 4,
                                               -1)
        rcnn_bbox_preds = rcnn_bbox_preds.mean(-1)

        # import ipdb
        # ipdb.set_trace()
        # select min of abs value
        # rcnn_bbox_preds = torch.abs(rcnn_bbox_preds)
        # _, min_idx = torch.min(rcnn_bbox_preds, dim=-1)
        # row_idx = torch.arange(min_idx.numel()).type_as(min_idx)
        # rcnn_bbox_preds = rcnn_bbox_preds[row_idx, min_idx]
        rcnn_bbox_preds = rcnn_bbox_preds.view(-1, 4)
        # rcnn_bbox_preds = rcnn_bbox_preds.max(3)[0].max(2)[0]

        #  prediction_dict['rcnn_cls_probs'] = rcnn_cls_probs
        prediction_dict['rcnn_bbox_preds'] = rcnn_bbox_preds
        prediction_dict['rcnn_cls_scores'] = rcnn_cls_scores

        # used for track
        proposals_order = prediction_dict['proposals_order']
        prediction_dict['second_rpn_anchors'] = prediction_dict['anchors'][0][
            proposals_order]

        return prediction_dict

    #  def unloaded_parameters(self):
    #  return ['rcnn_cls_pred.bias', 'rcnn_cls_pred.weight']
    def generate_channel_attention(self, feat):
        return feat.mean(3, keepdim=True).mean(2, keepdim=True)

    def generate_spatial_attention(self, feat):
        return self.spatial_attention(feat)

    def init_weights(self):
        # submodule init weights
        self.feature_extractor.init_weights()
        self.rpn_model.init_weights()

        Filler.normal_init(self.rcnn_cls_pred, 0, 0.01, self.truncated)
        Filler.normal_init(self.rcnn_bbox_pred, 0, 0.001, self.truncated)

    def init_modules(self):
        self.feature_extractor = ResNetFeatureExtractor(
            self.feature_extractor_config)
        self.rpn_model = RPNModel(self.rpn_config)
        self.rcnn_pooling = RoIAlignAvg(self.pooling_size, self.pooling_size,
                                        1.0 / 16.0)
        # self.rcnn_cls_pred = nn.Conv2d(2048, self.n_classes, 3, 1, 1)
        self.rcnn_cls_pred = nn.Linear(2048, self.n_classes)
        self.rcnn_bbox_pred = nn.Conv2d(2048, 4, 3, 1, 1)
        # if self.class_agnostic:
        # self.rcnn_bbox_pred = nn.Linear(2048, 4)
        # else:
        # self.rcnn_bbox_pred = nn.Linear(2048, 4 * self.n_classes)

        # loss module
        # if self.use_focal_loss:
        # self.rcnn_cls_loss = FocalLoss(2)
        # else:
        # self.rcnn_cls_loss = functools.partial(
        # F.cross_entropy, reduce=False)
        self.rcnn_cls_loss = nn.MSELoss(reduce=False)

        self.rcnn_bbox_loss = nn.modules.SmoothL1Loss(reduce=False)

        # attention
        self.spatial_attention = nn.Conv2d(2048, 1, 3, 1, 1)

    def init_param(self, model_config):
        classes = model_config['classes']
        self.classes = classes
        self.n_classes = len(classes)
        self.class_agnostic = model_config['class_agnostic']
        self.pooling_size = model_config['pooling_size']
        self.pooling_mode = model_config['pooling_mode']
        self.crop_resize_with_max_pool = model_config[
            'crop_resize_with_max_pool']
        self.truncated = model_config['truncated']
        self.use_self_attention = model_config.get('use_self_attention')

        self.use_focal_loss = model_config['use_focal_loss']
        self.subsample_twice = model_config['subsample_twice']
        self.rcnn_batch_size = model_config['rcnn_batch_size']

        # some submodule config
        self.feature_extractor_config = model_config[
            'feature_extractor_config']
        self.rpn_config = model_config['rpn_config']

        # assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # sampler
        # self.bbox_sampler = DetectionSampler({'fg_fraction': 0.5})
        # self.bbox_sampler = HardNegativeSampler({'fg_fraction': 1})
        # self.bbox_sampler = BalancedSampler({'fg_fraction': 1})
        # self.iou_sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler({'fg_fraction': 1})

    def loss(self, prediction_dict, feed_dict):
        """
        assign proposals label and subsample from them
        Then calculate loss
        """
        # import ipdb
        # ipdb.set_trace()
        loss_dict = {}

        # submodule loss
        loss_dict.update(self.rpn_model.loss(prediction_dict, feed_dict))

        rois_batch = prediction_dict['rois_batch']
        gt_boxes = feed_dict['gt_boxes']
        gt_labels = feed_dict['gt_labels']

        ##########################
        # assigner
        ##########################
        rcnn_cls_targets, rcnn_reg_targets, rcnn_cls_weights, rcnn_reg_weights = self.target_assigner.assign(
            rois_batch[:, :, 1:], gt_boxes, gt_labels)

        # bounding box regression L1 loss
        rcnn_bbox_preds = prediction_dict['rcnn_bbox_preds']
        rcnn_bbox_loss = self.rcnn_bbox_loss(rcnn_bbox_preds,
                                             rcnn_reg_targets[0]).sum(dim=-1)
        reg_criterion = rcnn_reg_weights * rcnn_bbox_loss
        # reg_criterion = self.target_assigner.matcher.assigned_overlaps_batch

        # bbox subsample
        pos_indicator = rcnn_reg_weights > 0
        bbox_batch_sampled_mask = self.sampler.subsample_batch(
            self.rcnn_batch_size, pos_indicator, criterion=reg_criterion)

        rcnn_reg_weights *= bbox_batch_sampled_mask.type_as(rcnn_reg_weights)
        num_reg_coeff = (rcnn_reg_weights > 0).sum(dim=-1)
        assert num_reg_coeff, 'bug happens'
        rcnn_bbox_loss *= rcnn_reg_weights[0]
        rcnn_bbox_loss = rcnn_bbox_loss.sum(dim=-1) / num_reg_coeff.float()

        # classification loss
        #  rcnn_cls_scores = prediction_dict['rcnn_cls_probs'][:, 1]
        #  rcnn_cls_loss = self.rcnn_cls_loss(rcnn_cls_scores,
        #  rcnn_cls_targets[0])

        # cls subsample
        #  cls_criterion = rcnn_cls_loss * rcnn_cls_weights
        #  indicator = rcnn_cls_weights > 0
        #  pos_indicator = indicator
        #  cls_batch_sampled_mask = self.sampler.subsample_batch(
        #  self.rcnn_batch_size,
        #  pos_indicator,
        #  criterion=cls_criterion,
        #  indicator=indicator)

        #  cls_batch_sampled_mask |= rcnn_reg_weights.type_as(
        #  cls_batch_sampled_mask)
        #  rcnn_cls_weights *= cls_batch_sampled_mask.type_as(rcnn_cls_weights)
        #  num_cls_coeff = (rcnn_cls_weights > 0).sum(dim=-1)
        #  assert num_cls_coeff, 'bug happens'
        #  rcnn_cls_loss *= rcnn_cls_weights[0]
        #  rcnn_cls_loss = rcnn_cls_loss.sum(dim=-1) / num_cls_coeff.float()

        # loss weights has no gradients
        #  loss_dict['rcnn_cls_loss'] = rcnn_cls_loss
        loss_dict['rcnn_bbox_loss'] = rcnn_bbox_loss

        prediction_dict['rcnn_reg_weights'] = rcnn_reg_weights

        return loss_dict
class RPNModel(Model):
    def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        # self.rpn_batch_size = model_config['rpn_batch_size']
        self.num_cls_samples = model_config['num_cls_samples']
        self.num_reg_samples = model_config['num_reg_samples']
        self.use_focal_loss = model_config['use_focal_loss']

        # sampler
        # self.sampler = HardNegativeSampler({"fg_fraction": 1.0})
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler({"fg_fraction": 1.0})

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

        self.use_iou = model_config.get('use_iou')

    def init_weights(self):
        self.truncated = False

        Filler.normal_init(self.rpn_conv, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_cls_score, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_bbox_pred, 0, 0.01, self.truncated)

    def unfreeze_modules(self):
        unfreeze_modules = [
            self.rpn_coarse_map_conv_iod.bias, self.rpn_fine_map_conv_iod.bias,
            self.rpn_coarse_map_conv_iog.bias, self.rpn_fine_map_conv_iog.bias,
            self.rpn_coarse_map_conv_iou.bias, self.rpn_fine_map_conv_iou.bias,
            self.rpn_coarse_map_conv_iod.weight,
            self.rpn_fine_map_conv_iod.weight,
            self.rpn_coarse_map_conv_iog.weight,
            self.rpn_fine_map_conv_iog.weight,
            self.rpn_coarse_map_conv_iou.weight,
            self.rpn_fine_map_conv_iou.weight
        ]
        for module in unfreeze_modules:
            module.requires_grad = True

    def init_modules(self):
        # define the convrelu layers processing input feature map
        self.rpn_conv = nn.Conv2d(self.in_channels, 512, 3, 1, 1, bias=True)

        # define bg/fg classifcation score layer
        self.rpn_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)

        # define anchor box offset prediction layer

        if self.use_score:
            bbox_feat_channels = 512 + 2
            self.nc_bbox_out /= self.num_anchors
        else:
            bbox_feat_channels = 512
        self.rpn_bbox_pred = nn.Conv2d(bbox_feat_channels, self.nc_bbox_out, 1,
                                       1, 0)

        # bbox
        self.rpn_bbox_loss = nn.modules.loss.SmoothL1Loss(reduce=False)

        # cls
        if self.use_focal_loss:
            self.rpn_cls_loss = FocalLoss(2)
        else:
            self.rpn_cls_loss = functools.partial(F.cross_entropy,
                                                  reduce=False)

    def generate_proposal(self, rpn_cls_probs, anchors, rpn_bbox_preds,
                          im_info):
        # TODO create a new Function
        """
        Args:
        rpn_cls_probs: FloatTensor,shape(N,2*num_anchors,H,W)
        rpn_bbox_preds: FloatTensor,shape(N,num_anchors*4,H,W)
        anchors: FloatTensor,shape(N,4,H,W)

        Returns:
        proposals_batch: FloatTensor, shape(N,post_nms_topN,4)
        fg_probs_batch: FloatTensor, shape(N,post_nms_topN)
        """
        # assert len(
        # rpn_bbox_preds) == 1, 'just one feature maps is supported now'
        # rpn_bbox_preds = rpn_bbox_preds[0]
        anchors = anchors[0]
        # do not backward
        anchors = anchors
        rpn_cls_probs = rpn_cls_probs.detach()
        rpn_bbox_preds = rpn_bbox_preds.detach()

        batch_size = rpn_bbox_preds.shape[0]
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(batch_size, -1, 4)
        # apply deltas to anchors to decode
        # loop here due to many features maps
        # proposals = []
        # for rpn_bbox_preds_single_map, anchors_single_map in zip(
        # rpn_bbox_preds, anchors):
        # proposals.append(
        # self.bbox_coder.decode(rpn_bbox_preds_single_map,
        # anchors_single_map))
        # proposals = torch.cat(proposals, dim=1)

        proposals = self.bbox_coder.decode_batch(rpn_bbox_preds, anchors)

        # filer and clip
        proposals = box_ops.clip_boxes(proposals, im_info)

        # fg prob
        fg_probs = rpn_cls_probs[:, self.num_anchors:, :, :]
        fg_probs = fg_probs.permute(0, 2, 3,
                                    1).contiguous().view(batch_size, -1)

        # sort fg
        _, fg_probs_order = torch.sort(fg_probs, dim=1, descending=True)

        # fg_probs_batch = torch.zeros(batch_size,
        # self.post_nms_topN).type_as(rpn_cls_probs)
        proposals_batch = torch.zeros(batch_size, self.post_nms_topN,
                                      4).type_as(rpn_bbox_preds)
        proposals_order = torch.zeros(
            batch_size, self.post_nms_topN).fill_(-1).type_as(fg_probs_order)

        for i in range(batch_size):
            proposals_single = proposals[i]
            fg_probs_single = fg_probs[i]
            fg_order_single = fg_probs_order[i]
            # pre nms
            if self.pre_nms_topN > 0:
                fg_order_single = fg_order_single[:self.pre_nms_topN]
            proposals_single = proposals_single[fg_order_single]
            fg_probs_single = fg_probs_single[fg_order_single]

            # nms
            keep_idx_i = nms(
                torch.cat((proposals_single, fg_probs_single.unsqueeze(1)), 1),
                self.nms_thresh)
            keep_idx_i = keep_idx_i.long().view(-1)

            # post nms
            if self.post_nms_topN > 0:
                keep_idx_i = keep_idx_i[:self.post_nms_topN]
            proposals_single = proposals_single[keep_idx_i, :]
            fg_probs_single = fg_probs_single[keep_idx_i]
            fg_order_single = fg_order_single[keep_idx_i]

            # padding 0 at the end.
            num_proposal = keep_idx_i.numel()
            proposals_batch[i, :num_proposal, :] = proposals_single
            # fg_probs_batch[i, :num_proposal] = fg_probs_single
            proposals_order[i, :num_proposal] = fg_order_single
        return proposals_batch, proposals_order

    def forward(self, bottom_blobs):
        base_feat = bottom_blobs['base_feat']
        batch_size = base_feat.shape[0]
        gt_boxes = bottom_blobs['gt_boxes']
        im_info = bottom_blobs['im_info']

        # rpn conv
        rpn_conv = F.relu(self.rpn_conv(base_feat), inplace=True)

        # rpn cls score
        # shape(N,2*num_anchors,H,W)
        rpn_cls_scores = self.rpn_cls_score(rpn_conv)

        # rpn cls prob shape(N,2*num_anchors,H,W)
        rpn_cls_score_reshape = rpn_cls_scores.view(batch_size, 2, -1)
        rpn_cls_probs = F.softmax(rpn_cls_score_reshape, dim=1)
        rpn_cls_probs = rpn_cls_probs.view_as(rpn_cls_scores)
        # import ipdb
        # ipdb.set_trace()

        # rpn bbox pred
        # shape(N,4*num_anchors,H,W)
        if self.use_score:
            # shape (N,2,num_anchoros*H*W)
            rpn_cls_scores = rpn_cls_score_reshape.permute(0, 2, 1)
            rpn_bbox_preds = []
            for i in range(self.num_anchors):
                rpn_bbox_feat = torch.cat(
                    [rpn_conv, rpn_cls_scores[:, ::self.num_anchors, :, :]],
                    dim=1)
                rpn_bbox_preds.append(self.rpn_bbox_pred(rpn_bbox_feat))
            rpn_bbox_preds = torch.cat(rpn_bbox_preds, dim=1)
        else:
            # get rpn offsets to the anchor boxes
            rpn_bbox_preds = self.rpn_bbox_pred(rpn_conv)
            # rpn_bbox_preds = [rpn_bbox_preds]

        # generate anchors
        feature_map_list = [base_feat.size()[-2:]]
        anchors = self.anchor_generator.generate(feature_map_list)

        ###############################
        # Proposal
        ###############################
        # note that proposals_order is used for track transform of propsoals
        proposals_batch, proposals_order = self.generate_proposal(
            rpn_cls_probs, anchors, rpn_bbox_preds, im_info)
        batch_idx = torch.arange(batch_size).view(batch_size, 1).expand(
            -1, proposals_batch.shape[1]).type_as(proposals_batch)
        rois_batch = torch.cat((batch_idx.unsqueeze(-1), proposals_batch),
                               dim=2)

        if self.training:
            rois_batch = self.append_gt(rois_batch, gt_boxes)

        rpn_cls_scores = rpn_cls_scores.view(batch_size, 2, -1,
                                             rpn_cls_scores.shape[2],
                                             rpn_cls_scores.shape[3])
        rpn_cls_scores = rpn_cls_scores.permute(0, 3, 4, 2,
                                                1).contiguous().view(
                                                    batch_size, -1, 2)

        # postprocess
        rpn_cls_probs = rpn_cls_probs.view(batch_size, 2, -1,
                                           rpn_cls_probs.shape[2],
                                           rpn_cls_probs.shape[3])
        rpn_cls_probs = rpn_cls_probs.permute(0, 3, 4, 2, 1).contiguous().view(
            batch_size, -1, 2)
        predict_dict = {
            'proposals_batch': proposals_batch,
            'rpn_cls_scores': rpn_cls_scores,
            'rois_batch': rois_batch,
            'anchors': anchors,

            # used for loss
            'rpn_bbox_preds': rpn_bbox_preds,
            'rpn_cls_probs': rpn_cls_probs,
            'proposals_order': proposals_order,
        }

        return predict_dict

    def append_gt(self, rois_batch, gt_boxes):
        ################################
        # append gt_boxes to rois_batch for losses
        ################################
        # may be some bugs here
        gt_boxes_append = torch.zeros(gt_boxes.shape[0], gt_boxes.shape[1],
                                      5).type_as(gt_boxes)
        gt_boxes_append[:, :, 1:5] = gt_boxes[:, :, :4]
        # cat gt_boxes to rois_batch
        rois_batch = torch.cat([rois_batch, gt_boxes_append], dim=1)
        return rois_batch

    def loss(self, prediction_dict, feed_dict):
        # loss for cls
        loss_dict = {}

        gt_boxes = feed_dict['gt_boxes']

        anchors = prediction_dict['anchors']

        assert len(anchors) == 1, 'just one feature maps is supported now'
        anchors = anchors[0]

        #################################
        # target assigner
        ################################
        # no need gt labels here,it just a binary classifcation problem
        #  import ipdb
        #  ipdb.set_trace()
        rpn_cls_targets, rpn_reg_targets, \
            rpn_cls_weights, rpn_reg_weights = \
            self.target_assigner.assign(anchors, gt_boxes, gt_labels=None)

        ################################
        # double subsample
        ################################

        rpn_cls_probs = prediction_dict['rpn_cls_probs'][:, :, 1]
        cls_criterion = rpn_cls_probs
        # cls loss
        rpn_cls_score = prediction_dict['rpn_cls_scores']
        # rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score, rpn_cls_targets)
        rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score.view(-1, 2),
                                         rpn_cls_targets.view(-1))
        rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
        # cls_criterion = rpn_cls_loss
        # cls subsample
        # pos_indicator = rpn_cls_targets > 0
        # ignore fg/bg
        indicator = rpn_cls_weights > 0
        pos_indicator = indicator
        cls_batch_sampled_mask = self.sampler.subsample_batch(
            self.num_cls_samples,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)
        cls_batch_sampled_mask = cls_batch_sampled_mask.type_as(
            rpn_cls_weights)

        rpn_cls_weights = rpn_cls_weights * cls_batch_sampled_mask
        num_cls_coeff = (rpn_cls_weights > 0).sum(dim=1)

        # reg subsample
        # subsample all from fg
        pos_indicator = rpn_reg_weights > 0
        rpn_bbox_preds = prediction_dict['rpn_bbox_preds']
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        rpn_reg_loss = self.rpn_bbox_loss(rpn_bbox_preds, rpn_reg_targets)
        # reg_criterion = rpn_reg_loss.sum(dim=-1)

        reg_batch_sampled_mask = self.sampler.subsample_batch(
            self.num_reg_samples,
            pos_indicator,
            criterion=cls_criterion,
            indicator=None)
        reg_batch_sampled_mask |= cls_batch_sampled_mask.type_as(
            reg_batch_sampled_mask)
        reg_batch_sampled_mask = reg_batch_sampled_mask.type_as(
            rpn_cls_weights)
        rpn_reg_weights = rpn_reg_weights * reg_batch_sampled_mask
        num_reg_coeff = (rpn_reg_weights > 0).sum(dim=1)
        # check
        #  assert num_cls_coeff, 'bug happens'
        #  assert num_reg_coeff, 'bug happens'
        if num_cls_coeff == 0:
            num_cls_coeff = torch.ones([]).type_as(num_cls_coeff)
        if num_reg_coeff == 0:
            num_reg_coeff = torch.ones([]).type_as(num_reg_coeff)

        # cls loss
        # rpn_cls_score = prediction_dict['rpn_cls_scores']
        # # rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score, rpn_cls_targets)
        # rpn_cls_loss = self.rpn_cls_loss(
        # rpn_cls_score.view(-1, 2), rpn_cls_targets.view(-1))
        rpn_cls_loss *= rpn_cls_weights
        rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()

        # bbox loss
        # shape(N,num,4)
        # rpn_bbox_preds = prediction_dict['rpn_bbox_preds']
        # rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # # shape(N,H*W*num_anchors,4)
        # rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        # rpn_reg_loss = self.rpn_bbox_loss(rpn_bbox_preds, rpn_reg_targets)
        rpn_reg_loss *= rpn_reg_weights.unsqueeze(-1).expand(-1, -1, 4)
        rpn_reg_loss = rpn_reg_loss.view(
            rpn_reg_loss.shape[0], -1).sum(dim=1) / num_reg_coeff.float()

        loss_dict['rpn_cls_loss'] = rpn_cls_loss
        loss_dict['rpn_bbox_loss'] = rpn_reg_loss
        return loss_dict
class GateFasterRCNN(Model):
    def forward(self, feed_dict):
        # import ipdb
        # ipdb.set_trace()

        prediction_dict = {}

        # base model
        base_feat = self.feature_extractor.first_stage_feature(
            feed_dict['img'])
        feed_dict.update({'base_feat': base_feat})
        # batch_size = base_feat.shape[0]

        # rpn model
        prediction_dict.update(self.rpn_model.forward(feed_dict))

        # proposals = prediction_dict['proposals_batch']
        # shape(N,num_proposals,5)
        # pre subsample for reduce consume of memory
        if self.training:
            self.pre_subsample(prediction_dict, feed_dict)
        rois_batch = prediction_dict['rois_batch']

        # note here base_feat (N,C,H,W),rois_batch (N,num_proposals,5)
        pooled_feat = self.rcnn_pooling(base_feat, rois_batch.view(-1, 5))

        # shape(N,C,1,1)
        pooled_feat = self.feature_extractor.second_stage_feature(pooled_feat)
        # shape(N,C)
        pooled_feat = pooled_feat.mean(3).mean(2)

        rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat)
        rcnn_cls_scores = self.rcnn_cls_pred(pooled_feat)

        rcnn_cls_probs = F.softmax(rcnn_cls_scores, dim=1)

        prediction_dict['rcnn_cls_probs'] = rcnn_cls_probs
        prediction_dict['rcnn_bbox_preds'] = rcnn_bbox_preds
        prediction_dict['rcnn_cls_scores'] = rcnn_cls_scores

        # used for track
        proposals_order = prediction_dict['proposals_order']
        prediction_dict['second_rpn_anchors'] = prediction_dict['anchors'][0][
            proposals_order]

        return prediction_dict

    def init_weights(self):
        # submodule init weights
        self.feature_extractor.init_weights()
        self.rpn_model.init_weights()

        Filler.normal_init(self.rcnn_cls_pred, 0, 0.01, self.truncated)
        Filler.normal_init(self.rcnn_bbox_pred, 0, 0.001, self.truncated)

    def init_modules(self):
        self.feature_extractor = FeatureExtractor(
            self.feature_extractor_config)
        self.rpn_model = GateRPNModel(self.rpn_config)
        self.rcnn_pooling = RoIAlignAvg(self.pooling_size, self.pooling_size,
                                        1.0 / 16.0)
        self.rcnn_cls_pred = nn.Linear(2048, self.n_classes)
        if self.class_agnostic:
            self.rcnn_bbox_pred = nn.Linear(2048, 4)
        else:
            self.rcnn_bbox_pred = nn.Linear(2048, 4 * self.n_classes)

        # loss module
        if self.use_focal_loss:
            self.rcnn_cls_loss = FocalLoss(2)
        else:
            self.rcnn_cls_loss = functools.partial(F.cross_entropy,
                                                   reduce=False)

        self.rcnn_bbox_loss = nn.modules.SmoothL1Loss(reduce=False)

    def init_param(self, model_config):
        classes = model_config['classes']
        self.classes = classes
        self.n_classes = len(classes)
        self.class_agnostic = model_config['class_agnostic']
        self.pooling_size = model_config['pooling_size']
        self.pooling_mode = model_config['pooling_mode']
        self.crop_resize_with_max_pool = model_config[
            'crop_resize_with_max_pool']
        self.truncated = model_config['truncated']

        self.use_focal_loss = model_config['use_focal_loss']
        self.subsample_twice = model_config['subsample_twice']
        self.rcnn_batch_size = model_config['rcnn_batch_size']

        # some submodule config
        self.feature_extractor_config = model_config[
            'feature_extractor_config']
        self.rpn_config = model_config['rpn_config']

        # assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])

    def pre_subsample(self, prediction_dict, feed_dict):
        rois_batch = prediction_dict['rois_batch']
        gt_boxes = feed_dict['gt_boxes']
        gt_labels = feed_dict['gt_labels']

        ##########################
        # assigner
        ##########################
        #  import ipdb
        #  ipdb.set_trace()
        rcnn_cls_targets, rcnn_reg_targets, rcnn_cls_weights, rcnn_reg_weights = self.target_assigner.assign(
            rois_batch[:, :, 1:], gt_boxes, gt_labels)

        ##########################
        # subsampler
        ##########################
        pos_indicator = rcnn_cls_targets > 0
        indicator = rcnn_cls_weights > 0

        # subsample from all
        # shape (N,M)
        # use overlaps to subsample
        use_iou_for_criteron = True
        if use_iou_for_criteron:
            cls_criterion = self.target_assigner.matcher.assigned_overlaps_batch
        else:
            cls_criterion = None
        batch_sampled_mask = self.sampler.subsample_batch(
            self.rcnn_batch_size,
            pos_indicator,
            indicator=indicator,
            criterion=cls_criterion)
        rcnn_cls_weights = rcnn_cls_weights[batch_sampled_mask]
        rcnn_reg_weights = rcnn_reg_weights[batch_sampled_mask]
        num_cls_coeff = rcnn_cls_weights.type(
            torch.cuda.ByteTensor).sum(dim=-1)
        num_reg_coeff = rcnn_reg_weights.type(
            torch.cuda.ByteTensor).sum(dim=-1)
        # check
        assert num_cls_coeff, 'bug happens'
        assert num_reg_coeff, 'bug happens'

        prediction_dict[
            'rcnn_cls_weights'] = rcnn_cls_weights / num_cls_coeff.float()
        prediction_dict[
            'rcnn_reg_weights'] = rcnn_reg_weights / num_reg_coeff.float()
        prediction_dict['rcnn_cls_targets'] = rcnn_cls_targets[
            batch_sampled_mask]
        prediction_dict['rcnn_reg_targets'] = rcnn_reg_targets[
            batch_sampled_mask]

        # update rois_batch
        prediction_dict['rois_batch'] = rois_batch[batch_sampled_mask].view(
            rois_batch.shape[0], -1, 5)

        if not self.training:
            # used for track
            proposals_order = prediction_dict['proposals_order']

            prediction_dict['proposals_order'] = proposals_order[
                batch_sampled_mask]

    def loss(self, prediction_dict, feed_dict):
        """
        assign proposals label and subsample from them
        Then calculate loss
        """
        loss_dict = {}

        # submodule loss
        loss_dict.update(self.rpn_model.loss(prediction_dict, feed_dict))

        # targets and weights
        rcnn_cls_weights = prediction_dict['rcnn_cls_weights']
        rcnn_reg_weights = prediction_dict['rcnn_reg_weights']

        rcnn_cls_targets = prediction_dict['rcnn_cls_targets']
        rcnn_reg_targets = prediction_dict['rcnn_reg_targets']

        # classification loss
        rcnn_cls_scores = prediction_dict['rcnn_cls_scores']
        rcnn_cls_loss = self.rcnn_cls_loss(rcnn_cls_scores, rcnn_cls_targets)
        rcnn_cls_loss *= rcnn_cls_weights
        rcnn_cls_loss = rcnn_cls_loss.sum(dim=-1)

        # bounding box regression L1 loss
        rcnn_bbox_preds = prediction_dict['rcnn_bbox_preds']
        rcnn_bbox_loss = self.rcnn_bbox_loss(rcnn_bbox_preds,
                                             rcnn_reg_targets).sum(dim=-1)
        rcnn_bbox_loss *= rcnn_reg_weights
        # rcnn_bbox_loss *= rcnn_reg_weights
        rcnn_bbox_loss = rcnn_bbox_loss.sum(dim=-1)

        # loss weights has no gradients
        loss_dict['rcnn_cls_loss'] = rcnn_cls_loss
        loss_dict['rcnn_bbox_loss'] = rcnn_bbox_loss

        # add rcnn_cls_targets to get the statics of rpn
        loss_dict['rcnn_cls_targets'] = rcnn_cls_targets

        return loss_dict
Example #11
0
class RPNModel(Model):
    def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        self.rpn_batch_size = model_config['rpn_batch_size']
        self.use_focal_loss = model_config['use_focal_loss']

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

        self.use_iou = model_config.get('use_iou')

    def init_weights(self):
        self.truncated = False

        Filler.normal_init(self.rpn_conv, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_cls_score, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_bbox_pred, 0, 0.01, self.truncated)

    def init_modules(self):
        # define the convrelu layers processing input feature map
        self.rpn_conv = nn.Conv2d(self.in_channels, 512, 3, 1, 1, bias=True)

        # define bg/fg classifcation score layer
        self.rpn_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)

        # define anchor box offset prediction layer

        if self.use_score:
            bbox_feat_channels = 512 + 2
            self.nc_bbox_out /= self.num_anchors
        else:
            bbox_feat_channels = 512
        self.rpn_bbox_pred = nn.Conv2d(bbox_feat_channels, self.nc_bbox_out, 1,
                                       1, 0)

        # bbox
        self.rpn_bbox_loss = nn.modules.loss.SmoothL1Loss(reduce=False)

        # cls
        if self.use_focal_loss:
            self.rpn_cls_loss = FocalLoss(2)
        else:
            self.rpn_cls_loss = functools.partial(F.cross_entropy,
                                                  reduce=False)

    # def generate_proposal(self, rpn_cls_probs, anchors, rpn_bbox_preds,
    # im_info):
    # pass

    def forward(self, bottom_blobs):
        base_feat = bottom_blobs['base_feat']
        batch_size = base_feat.shape[0]
        gt_boxes = bottom_blobs['gt_boxes']
        im_info = bottom_blobs['im_info']

        # rpn conv
        rpn_conv = F.relu(self.rpn_conv(base_feat), inplace=True)

        # rpn cls score
        # shape(N,2*num_anchors,H,W)
        rpn_cls_scores = self.rpn_cls_score(rpn_conv)

        # rpn cls prob shape(N,2*num_anchors,H,W)
        rpn_cls_score_reshape = rpn_cls_scores.view(batch_size, 2, -1)
        rpn_cls_probs = F.softmax(rpn_cls_score_reshape, dim=1)
        rpn_cls_probs = rpn_cls_probs.view_as(rpn_cls_scores)
        # import ipdb
        # ipdb.set_trace()

        # rpn bbox pred
        # shape(N,4*num_anchors,H,W)
        if self.use_score:
            # shape (N,2,num_anchoros*H*W)
            rpn_cls_scores = rpn_cls_score_reshape.permute(0, 2, 1)
            rpn_bbox_preds = []
            for i in range(self.num_anchors):
                rpn_bbox_feat = torch.cat(
                    [rpn_conv, rpn_cls_scores[:, ::self.num_anchors, :, :]],
                    dim=1)
                rpn_bbox_preds.append(self.rpn_bbox_pred(rpn_bbox_feat))
            rpn_bbox_preds = torch.cat(rpn_bbox_preds, dim=1)
        else:
            # get rpn offsets to the anchor boxes
            rpn_bbox_preds = self.rpn_bbox_pred(rpn_conv)
            # rpn_bbox_preds = [rpn_bbox_preds]

        # generate anchors
        feature_map_list = [base_feat.size()[-2:]]
        anchors = self.anchor_generator.generate(feature_map_list)

        ###############################
        # Proposal
        ###############################
        # note that proposals_order is used for track transform of propsoals
        rois_batch, proposals_order = Proposal.apply(rpn_cls_probs, anchors,
                                                     rpn_bbox_preds, im_info)
        # batch_idx = torch.arange(batch_size).view(batch_size, 1).expand(
        # -1, proposals_batch.shape[1]).type_as(proposals_batch)
        # rois_batch = torch.cat((batch_idx.unsqueeze(-1), proposals_batch),
        # dim=2)

        if self.training:
            rois_batch = self.append_gt(rois_batch, gt_boxes)

        rpn_cls_scores = rpn_cls_scores.view(batch_size, 2, -1,
                                             rpn_cls_scores.shape[2],
                                             rpn_cls_scores.shape[3])
        rpn_cls_scores = rpn_cls_scores.permute(0, 3, 4, 2,
                                                1).contiguous().view(
                                                    batch_size, -1, 2)

        # postprocess
        rpn_cls_probs = rpn_cls_probs.view(batch_size, 2, -1,
                                           rpn_cls_probs.shape[2],
                                           rpn_cls_probs.shape[3])
        rpn_cls_probs = rpn_cls_probs.permute(0, 3, 4, 2, 1).contiguous().view(
            batch_size, -1, 2)
        predict_dict = {
            'rpn_cls_scores': rpn_cls_scores,
            'rois_batch': rois_batch,
            'anchors': anchors,

            # used for loss
            'rpn_bbox_preds': rpn_bbox_preds,
            'rpn_cls_probs': rpn_cls_probs,
            'proposals_order': proposals_order,
        }

        return predict_dict

    def append_gt(self, rois_batch, gt_boxes):
        ################################
        # append gt_boxes to rois_batch for losses
        ################################
        # may be some bugs here
        gt_boxes_append = torch.zeros(gt_boxes.shape[0], gt_boxes.shape[1],
                                      5).type_as(gt_boxes)
        gt_boxes_append[:, :, 1:5] = gt_boxes[:, :, :4]
        # cat gt_boxes to rois_batch
        rois_batch = torch.cat([rois_batch, gt_boxes_append], dim=1)
        return rois_batch

    def loss(self, prediction_dict, feed_dict):
        # loss for cls
        loss_dict = {}

        gt_boxes = feed_dict['gt_boxes']

        anchors = prediction_dict['anchors']

        assert len(anchors) == 1, 'just one feature maps is supported now'
        anchors = anchors[0]

        #################################
        # target assigner
        ################################
        # no need gt labels here,it just a binary classifcation problem
        #  import ipdb
        #  ipdb.set_trace()
        rpn_cls_targets, rpn_reg_targets, \
            rpn_cls_weights, rpn_reg_weights = \
            self.target_assigner.assign(anchors, gt_boxes, gt_labels=None)

        ################################
        # subsample
        ################################

        pos_indicator = rpn_reg_weights > 0
        indicator = rpn_cls_weights > 0

        if self.use_iou:
            cls_criterion = self.target_assigner.matcher.assigned_overlaps_batch
        else:
            rpn_cls_probs = prediction_dict['rpn_cls_probs'][:, :, 1]
            cls_criterion = rpn_cls_probs

        batch_sampled_mask = self.sampler.subsample_batch(
            self.rpn_batch_size,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)
        batch_sampled_mask = batch_sampled_mask.type_as(rpn_cls_weights)
        rpn_cls_weights = rpn_cls_weights * batch_sampled_mask
        rpn_reg_weights = rpn_reg_weights * batch_sampled_mask
        num_cls_coeff = (rpn_cls_weights > 0).sum(dim=1)
        num_reg_coeff = (rpn_reg_weights > 0).sum(dim=1)
        # check
        #  assert num_cls_coeff, 'bug happens'
        #  assert num_reg_coeff, 'bug happens'
        if num_cls_coeff == 0:
            num_cls_coeff = torch.ones([]).type_as(num_cls_coeff)
        if num_reg_coeff == 0:
            num_reg_coeff = torch.ones([]).type_as(num_reg_coeff)

        # cls loss
        rpn_cls_score = prediction_dict['rpn_cls_scores']
        # rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score, rpn_cls_targets)
        rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score.view(-1, 2),
                                         rpn_cls_targets.view(-1))
        rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
        rpn_cls_loss *= rpn_cls_weights
        rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()

        # bbox loss
        # shape(N,num,4)
        rpn_bbox_preds = prediction_dict['rpn_bbox_preds']
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        rpn_reg_loss = self.rpn_bbox_loss(rpn_bbox_preds, rpn_reg_targets)
        rpn_reg_loss *= rpn_reg_weights.unsqueeze(-1).expand(-1, -1, 4)
        rpn_reg_loss = rpn_reg_loss.view(
            rpn_reg_loss.shape[0], -1).sum(dim=1) / num_reg_coeff.float()

        loss_dict['rpn_cls_loss'] = rpn_cls_loss
        loss_dict['rpn_bbox_loss'] = rpn_reg_loss
        return loss_dict
class OrgOHEMThreeIoUSecondStageFasterRCNN(Model):
    def forward(self, feed_dict):
        # import ipdb
        # ipdb.set_trace()

        prediction_dict = {}

        # base model
        base_feat = self.feature_extractor.first_stage_feature(
            feed_dict['img'])
        feed_dict.update({'base_feat': base_feat})
        # batch_size = base_feat.shape[0]

        # rpn model
        prediction_dict.update(self.rpn_model.forward(feed_dict))

        rois_batch = prediction_dict['rois_batch']

        # note here base_feat (N,C,H,W),rois_batch (N,num_proposals,5)
        pooled_feat = self.rcnn_pooling(base_feat, rois_batch.view(-1, 5))

        # shape(N,C,1,1)
        pooled_feat_reg = self.feature_extractor.second_stage_feature(
            pooled_feat)
        ########################################
        # semantic map
        ########################################
        # no necessary for iou
        # pooled_feat_reg = pooled_feat_reg.mean(3).mean(2)

        pooled_feat_cls = self.feature_extractor.third_stage_feature(
            pooled_feat)
        pooled_feat_cls = pooled_feat_cls.mean(3).mean(2)

        rcnn_cls_scores = self.rcnn_cls_pred(pooled_feat_cls)
        # rcnn_cls_scores = rcnn_cls_scores_map.mean(3).mean(2)
        # saliency_map = F.softmax(rcnn_cls_scores_map, dim=1)
        rcnn_cls_probs = F.softmax(rcnn_cls_scores, dim=1)

        pooled_feat_reg = pooled_feat_reg.mean(3).mean(2)
        rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat_reg)

        prediction_dict['rcnn_cls_probs'] = rcnn_cls_probs
        prediction_dict['rcnn_bbox_preds'] = rcnn_bbox_preds
        prediction_dict['rcnn_cls_scores'] = rcnn_cls_scores

        # used for track
        proposals_order = prediction_dict['proposals_order']
        prediction_dict['second_rpn_anchors'] = prediction_dict['anchors'][0][
            proposals_order]

        return prediction_dict

    def unfreeze_part_modules(self):
        model = self.feature_extractor.third_stage_feature
        for param in model.parameters():
            param.requires_grad = True

    def init_weights(self):
        # submodule init weights
        self.feature_extractor.init_weights()
        self.rpn_model.init_weights()

        Filler.normal_init(self.rcnn_cls_pred, 0, 0.01, self.truncated)
        Filler.normal_init(self.rcnn_bbox_pred, 0, 0.001, self.truncated)

        self.freeze_modules()
        self.unfreeze_part_modules()

    def init_modules(self):
        self.feature_extractor = ResNetFeatureExtractor(
            self.feature_extractor_config)
        self.rpn_model = RPNModel(self.rpn_config)
        self.rcnn_pooling = RoIAlignAvg(self.pooling_size, self.pooling_size,
                                        1.0 / 16.0)
        # self.rcnn_cls_pred = nn.Conv2d(2048, self.n_classes, 3, 1, 1)
        # import ipdb
        # ipdb.set_trace()
        self.rcnn_cls_pred = nn.Linear(2048, self.n_classes)
        #  self.rcnn_bbox_pred = nn.Conv2d(2048, 4, 3, 1, 1)
        # if self.class_agnostic:
        self.rcnn_bbox_pred = nn.Linear(2048, 4)
        # else:
        # self.rcnn_bbox_pred = nn.Linear(2048, 4 * self.n_classes)

        # loss module
        # if self.use_focal_loss:
        # self.rcnn_cls_loss = FocalLoss(2)
        # else:
        # self.rcnn_cls_loss = functools.partial(
        # F.cross_entropy, reduce=False)
        self.rcnn_cls_loss = nn.MSELoss(reduce=False)

        self.rcnn_bbox_loss = nn.modules.SmoothL1Loss(reduce=False)

        # attention
        self.spatial_attention = nn.Conv2d(2048, 1, 3, 1, 1)

    def init_param(self, model_config):
        classes = model_config['classes']
        self.classes = classes
        self.n_classes = len(classes)
        self.class_agnostic = model_config['class_agnostic']
        self.pooling_size = model_config['pooling_size']
        self.pooling_mode = model_config['pooling_mode']
        self.crop_resize_with_max_pool = model_config[
            'crop_resize_with_max_pool']
        self.truncated = model_config['truncated']
        self.use_self_attention = model_config.get('use_self_attention')

        self.use_focal_loss = model_config['use_focal_loss']
        self.subsample_twice = model_config['subsample_twice']
        self.rcnn_batch_size = model_config['rcnn_batch_size']

        # some submodule config
        self.feature_extractor_config = model_config[
            'feature_extractor_config']
        self.rpn_config = model_config['rpn_config']

        # assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # sampler
        # self.bbox_sampler = DetectionSampler({'fg_fraction': 0.5})
        # self.bbox_sampler = HardNegativeSampler({'fg_fraction': 1})
        # self.bbox_sampler = BalancedSampler({'fg_fraction': 1})
        # self.iou_sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler({'fg_fraction': 1})

    def loss(self, prediction_dict, feed_dict):
        """
        assign proposals label and subsample from them
        Then calculate loss
        """
        # import ipdb
        # ipdb.set_trace()
        loss_dict = {}

        # submodule loss
        loss_dict.update(self.rpn_model.loss(prediction_dict, feed_dict))

        rois_batch = prediction_dict['rois_batch']
        gt_boxes = feed_dict['gt_boxes']
        gt_labels = feed_dict['gt_labels']

        ##########################
        # assigner
        ##########################
        rcnn_cls_targets, rcnn_reg_targets, rcnn_cls_weights, rcnn_reg_weights = self.target_assigner.assign(
            rois_batch[:, :, 1:], gt_boxes, gt_labels)

        # bounding box regression L1 loss
        #  rcnn_bbox_preds = prediction_dict['rcnn_bbox_preds']
        #  rcnn_bbox_loss = self.rcnn_bbox_loss(rcnn_bbox_preds,
        #  rcnn_reg_targets[0]).sum(dim=-1)
        #  reg_criterion = rcnn_reg_weights * rcnn_bbox_loss
        # reg_criterion = self.target_assigner.matcher.assigned_overlaps_batch

        # bbox subsample
        #  pos_indicator = rcnn_reg_weights > 0
        #  bbox_batch_sampled_mask = self.sampler.subsample_batch(
        #  self.rcnn_batch_size, pos_indicator, criterion=reg_criterion)

        #  rcnn_reg_weights *= bbox_batch_sampled_mask.type_as(rcnn_reg_weights)
        #  num_reg_coeff = (rcnn_reg_weights > 0).sum(dim=-1)
        #  assert num_reg_coeff, 'bug happens'
        #  rcnn_bbox_loss *= rcnn_reg_weights[0]
        #  rcnn_bbox_loss = rcnn_bbox_loss.sum(dim=-1) / num_reg_coeff.float()

        # classification loss
        rcnn_cls_scores = prediction_dict['rcnn_cls_probs'][:, 1]
        rcnn_cls_loss = self.rcnn_cls_loss(rcnn_cls_scores,
                                           rcnn_cls_targets[0])

        # cls subsample
        cls_criterion = rcnn_cls_loss * rcnn_cls_weights
        indicator = rcnn_cls_weights > 0
        pos_indicator = indicator
        cls_batch_sampled_mask = self.sampler.subsample_batch(
            self.rcnn_batch_size,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)

        #  cls_batch_sampled_mask |= rcnn_reg_weights.type_as(
        #  cls_batch_sampled_mask)
        rcnn_cls_weights *= cls_batch_sampled_mask.type_as(rcnn_cls_weights)
        num_cls_coeff = (rcnn_cls_weights > 0).sum(dim=-1)
        assert num_cls_coeff, 'bug happens'
        rcnn_cls_loss *= rcnn_cls_weights[0]
        rcnn_cls_loss = rcnn_cls_loss.sum(dim=-1) / num_cls_coeff.float()

        # loss weights has no gradients
        loss_dict['rcnn_cls_loss'] = rcnn_cls_loss
        #  loss_dict['rcnn_bbox_loss'] = rcnn_bbox_loss

        # analysis precision
        #  import ipdb
        #  ipdb.set_trace()
        rcnn_cls_probs = prediction_dict['rcnn_cls_probs']
        fake_match = self.target_assigner.analyzer.match
        num_gt = feed_dict['gt_labels'].numel()
        self.target_assigner.analyzer.analyze_ap(fake_match,
                                                 rcnn_cls_probs[:, 1],
                                                 num_gt,
                                                 thresh=0.5)
        prediction_dict['rcnn_reg_weights'] = rcnn_reg_weights

        return loss_dict
Example #13
0
class LEDRPNModel(Model):
    def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        self.rpn_batch_size = model_config['rpn_batch_size']
        self.use_focal_loss = model_config['use_focal_loss']
        self.alpha = 0.6
        self.theta = 1.0
        self.iox_bbox_coder = DiscreteBBoxCoder(
            model_config['iox_coder_config'])

        self.use_sharpL2 = model_config.get('use_sharpL2')
        self.use_sigmoid = model_config['use_sigmoid']
        self.use_cls_pred = model_config['use_cls_pred']

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = LEDTargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

    def iox_clip(self, iox):
        iox = iox.clone()
        iox[iox < 0] = 0
        iox[iox > 1] = 1
        return iox

    def init_weights(self):
        self.truncated = False

        Filler.normal_init(self.rpn_conv, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_cls_score, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_bbox_pred, 0, 0.01, self.truncated)

        Filler.normal_init(self.rpn_coarse_map_conv_iod, 0, 0.001,
                           self.truncated)
        Filler.normal_init(self.rpn_fine_map_conv_iod, 0, 0.001,
                           self.truncated)

        Filler.normal_init(self.rpn_coarse_map_conv_iou, 0, 0.001,
                           self.truncated)

        Filler.normal_init(self.rpn_fine_map_conv_iou, 0, 0.001,
                           self.truncated)
        Filler.normal_init(self.rpn_fine_map_conv_iog, 0, 0.001,
                           self.truncated)
        Filler.normal_init(self.rpn_coarse_map_conv_iog, 0, 0.001,
                           self.truncated)

    def unfreeze_modules(self):
        unfreeze_modules = [
            self.rpn_coarse_map_conv_iod.bias, self.rpn_fine_map_conv_iod.bias,
            self.rpn_coarse_map_conv_iog.bias, self.rpn_fine_map_conv_iog.bias,
            self.rpn_coarse_map_conv_iou.bias, self.rpn_fine_map_conv_iou.bias,
            self.rpn_coarse_map_conv_iod.weight,
            self.rpn_fine_map_conv_iod.weight,
            self.rpn_coarse_map_conv_iog.weight,
            self.rpn_fine_map_conv_iog.weight,
            self.rpn_coarse_map_conv_iou.weight,
            self.rpn_fine_map_conv_iou.weight
        ]
        for module in unfreeze_modules:
            module.requires_grad = True

    def init_modules(self):
        # define the convrelu layers processing input feature map
        self.rpn_conv = nn.Conv2d(self.in_channels, 512, 3, 1, 1, bias=True)

        # define bg/fg classifcation score layer
        self.rpn_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)
        self.rpn_coarse_map_conv_iou = nn.Conv2d(512, 4 * self.num_anchors, 1,
                                                 1, 0)
        self.rpn_fine_map_conv_iou = nn.Conv2d(512, 4 * self.num_anchors, 1, 1,
                                               0)

        self.rpn_coarse_map_conv_iog = nn.Conv2d(512, 4 * self.num_anchors, 1,
                                                 1, 0)
        self.rpn_fine_map_conv_iog = nn.Conv2d(512, 4 * self.num_anchors, 1, 1,
                                               0)

        self.rpn_coarse_map_conv_iod = nn.Conv2d(512, 4 * self.num_anchors, 1,
                                                 1, 0)
        self.rpn_fine_map_conv_iod = nn.Conv2d(512, 4 * self.num_anchors, 1, 1,
                                               0)

        # define anchor box offset prediction layer

        if self.use_score:
            bbox_feat_channels = 512 + 2
            self.nc_bbox_out /= self.num_anchors
        else:
            bbox_feat_channels = 512
        self.rpn_bbox_pred = nn.Conv2d(bbox_feat_channels, self.nc_bbox_out, 1,
                                       1, 0)

        # rpn bbox
        self.rpn_bbox_loss = nn.modules.loss.SmoothL1Loss(reduce=False)

        if self.use_sharpL2:
            self.reg_loss = SharpL2Loss()
        else:
            self.reg_loss = nn.MSELoss(reduce=False)
        self.cls_loss = nn.CrossEntropyLoss(reduce=False)

        # rpn cls
        if self.use_focal_loss:
            self.rpn_cls_loss = FocalLoss(2)
        else:
            self.rpn_cls_loss = functools.partial(F.cross_entropy,
                                                  reduce=False)

    def generate_proposal(self, rpn_cls_probs, anchors, rpn_bbox_preds,
                          im_info):
        # TODO create a new Function
        """
        Args:
        rpn_cls_probs: FloatTensor,shape(N,2*num_anchors,H,W)
        rpn_bbox_preds: FloatTensor,shape(N,num_anchors*4,H,W)
        anchors: FloatTensor,shape(N,4,H,W)

        Returns:
        proposals_batch: FloatTensor, shape(N,post_nms_topN,4)
        fg_probs_batch: FloatTensor, shape(N,post_nms_topN)
        """
        # assert len(
        # rpn_bbox_preds) == 1, 'just one feature maps is supported now'
        # rpn_bbox_preds = rpn_bbox_preds[0]
        anchors = anchors[0]
        # do not backward
        anchors = anchors
        rpn_fg_cls_probs = rpn_cls_probs.detach()
        rpn_bbox_preds = rpn_bbox_preds.detach()

        batch_size = rpn_bbox_preds.shape[0]
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(batch_size, -1, 4)
        # apply deltas to anchors to decode
        # loop here due to many features maps
        # proposals = []
        # for rpn_bbox_preds_single_map, anchors_single_map in zip(
        # rpn_bbox_preds, anchors):
        # proposals.append(
        # self.bbox_coder.decode(rpn_bbox_preds_single_map,
        # anchors_single_map))
        # proposals = torch.cat(proposals, dim=1)

        proposals = self.bbox_coder.decode_batch(rpn_bbox_preds, anchors)

        # filer and clip
        proposals = box_ops.clip_boxes(proposals, im_info)

        # fg prob
        # fg_probs = rpn_cls_probs[:, self.num_anchors:, :, :]
        # fg_probs = fg_probs.permute(0, 2, 3, 1).contiguous().view(batch_size,
        # -1)
        fg_probs = rpn_fg_cls_probs

        # sort fg
        _, fg_probs_order = torch.sort(fg_probs, dim=1, descending=True)

        # fg_probs_batch = torch.zeros(batch_size,
        # self.post_nms_topN).type_as(rpn_cls_probs)
        proposals_batch = torch.zeros(batch_size, self.post_nms_topN,
                                      4).type_as(rpn_bbox_preds)
        proposals_order = torch.zeros(
            batch_size, self.post_nms_topN).fill_(-1).type_as(fg_probs_order)

        for i in range(batch_size):
            proposals_single = proposals[i]
            fg_probs_single = fg_probs[i]
            fg_order_single = fg_probs_order[i]
            # pre nms
            if self.pre_nms_topN > 0:
                fg_order_single = fg_order_single[:self.pre_nms_topN]
            proposals_single = proposals_single[fg_order_single]
            fg_probs_single = fg_probs_single[fg_order_single]

            # nms
            keep_idx_i = nms(
                torch.cat((proposals_single, fg_probs_single.unsqueeze(1)), 1),
                self.nms_thresh)
            keep_idx_i = keep_idx_i.long().view(-1)

            # post nms
            if self.post_nms_topN > 0:
                keep_idx_i = keep_idx_i[:self.post_nms_topN]
            proposals_single = proposals_single[keep_idx_i, :]
            fg_probs_single = fg_probs_single[keep_idx_i]
            fg_order_single = fg_order_single[keep_idx_i]

            # padding 0 at the end.
            num_proposal = keep_idx_i.numel()
            proposals_batch[i, :num_proposal, :] = proposals_single
            # fg_probs_batch[i, :num_proposal] = fg_probs_single
            proposals_order[i, :num_proposal] = fg_order_single
        return proposals_batch, proposals_order

    def iou_pred(self, rpn_conv):
        return self.iox_pred(rpn_conv, self.rpn_coarse_map_conv_iou,
                             self.rpn_fine_map_conv_iou)

    def iog_pred(self, rpn_conv):
        return self.iox_pred(rpn_conv, self.rpn_coarse_map_conv_iog,
                             self.rpn_fine_map_conv_iog)

    def iod_pred(self, rpn_conv):
        return self.iox_pred(rpn_conv, self.rpn_coarse_map_conv_iod,
                             self.rpn_fine_map_conv_iod)

    def iox_pred(self, rpn_conv, rpn_coarse_map_conv, rpn_fine_map_conv):
        batch_size = rpn_conv.shape[0]
        coarse_map = rpn_coarse_map_conv(rpn_conv)
        fine_map = rpn_fine_map_conv(rpn_conv)

        coarse_map_reshape = coarse_map.view(batch_size, 4, -1)
        iou_level_probs = F.softmax(coarse_map_reshape, dim=1)
        iou_level_probs = iou_level_probs.view_as(coarse_map)
        if self.use_sigmoid:
            # normalize it
            iou_reg = 2 * F.sigmoid(fine_map) - 1
        else:
            iou_reg = fine_map
        # reshape preprocess
        iou_reg = iou_reg.view(batch_size, 4, self.num_anchors, -1).permute(
            0, 3, 2, 1).contiguous().view(batch_size, -1, 4)
        iou_cls = iou_level_probs.view(batch_size, 4, self.num_anchors,
                                       -1).permute(0, 3, 2,
                                                   1).contiguous().view(
                                                       batch_size, -1, 4)
        decoded_iou = self.iox_bbox_coder.decode_batch(iou_cls, iou_reg)

        # used for cls and reg loss
        iou_cls_scores = coarse_map.view(batch_size, 4, self.num_anchors,
                                         -1).permute(0, 3, 2,
                                                     1).contiguous().view(
                                                         batch_size, -1, 4)
        return decoded_iou, iou_cls_scores, iou_reg

    def calculate_iou(self, iog, iod):
        mask = ~(iod == 0)
        iou_indirect = torch.zeros_like(iog)
        iod = iod[mask]
        iog = iog[mask]
        iou_indirect[mask] = (iod * iog) / (iod + iog - iod * iog)
        return iou_indirect

    def forward(self, bottom_blobs):
        # import ipdb
        # ipdb.set_trace()
        base_feat = bottom_blobs['base_feat']
        batch_size = base_feat.shape[0]
        gt_boxes = bottom_blobs['gt_boxes']
        # im_info = bottom_blobs['im_info']
        im_info = bottom_blobs['input_size']

        # rpn conv
        rpn_conv = F.relu(self.rpn_conv(base_feat), inplace=True)

        # rpn cls
        rpn_cls_scores = self.rpn_cls_score(rpn_conv)
        rpn_cls_score_reshape = rpn_cls_scores.view(batch_size, 2, -1)
        rpn_cls_probs = F.softmax(rpn_cls_score_reshape,
                                  dim=1).view_as(rpn_cls_scores)
        rpn_cls_probs = rpn_cls_probs.view(batch_size, 2, self.num_anchors,
                                           -1).permute(0, 3, 2,
                                                       1).contiguous().view(
                                                           batch_size, -1, 2)
        rpn_cls_scores = rpn_cls_scores.view(batch_size, 2, self.num_anchors,
                                             -1).permute(0, 3, 2,
                                                         1).contiguous().view(
                                                             batch_size, -1, 2)

        iou, iou_scores, iou_reg = self.iou_pred(rpn_conv)
        iog, iog_scores, iog_reg = self.iog_pred(rpn_conv)
        iod, iod_scores, iod_reg = self.iod_pred(rpn_conv)

        # bugs here
        iou = self.iox_clip(iou)
        iog = self.iox_clip(iog)
        iod = self.iox_clip(iod)

        iou_indirect = self.calculate_iou(iog, iod)
        iou_final = (1 - self.alpha) * iou_indirect + self.alpha * iou

        # import ipdb
        # ipdb.set_trace()
        rpn_fg_probs_final = rpn_cls_probs[:, :, 1] * torch.exp(-torch.pow(
            (1 - iou_final), 2) / self.theta)

        # rpn_cls_score_reshape = rpn_cls_scores.view(batch_size, 2, -1)
        # rpn_cls_probs = F.softmax(rpn_cls_score_reshape, dim=1)
        # rpn_cls_probs = rpn_cls_probs.view_as(rpn_cls_scores)
        # import ipdb
        # ipdb.set_trace()

        # rpn bbox pred
        # shape(N,4*num_anchors,H,W)
        # if self.use_score:
        # # shape (N,2,num_anchoros*H*W)
        # rpn_cls_scores = rpn_cls_score_reshape.permute(0, 2, 1)
        # rpn_bbox_preds = []
        # for i in range(self.num_anchors):
        # rpn_bbox_feat = torch.cat(
        # [rpn_conv, rpn_cls_scores[:, ::self.num_anchors, :, :]],
        # dim=1)
        # rpn_bbox_preds.append(self.rpn_bbox_pred(rpn_bbox_feat))
        # rpn_bbox_preds = torch.cat(rpn_bbox_preds, dim=1)
        # else:
        # get rpn offsets to the anchor boxes
        rpn_bbox_preds = self.rpn_bbox_pred(rpn_conv)
        # rpn_bbox_preds = [rpn_bbox_preds]

        # generate anchors
        feature_map_list = [base_feat.size()[-2:]]
        anchors = self.anchor_generator.generate(feature_map_list)

        ###############################
        # Proposal
        ###############################
        # note that proposals_order is used for tracking transform of propsoals
        proposals_batch, proposals_order = self.generate_proposal(
            rpn_fg_probs_final, anchors, rpn_bbox_preds, im_info)
        batch_idx = torch.arange(batch_size).view(batch_size, 1).expand(
            -1, proposals_batch.shape[1]).type_as(proposals_batch)
        rois_batch = torch.cat((batch_idx.unsqueeze(-1), proposals_batch),
                               dim=2)

        if self.training:
            rois_batch = self.append_gt(rois_batch, gt_boxes)

        predict_dict = {
            'proposals_batch': proposals_batch,
            # used for sorting
            'rpn_iou_final': rpn_fg_probs_final,
            'rois_batch': rois_batch,
            'anchors': anchors,

            # used for loss
            'rpn_bbox_preds': rpn_bbox_preds,
            'proposals_order': proposals_order,
            # reg
            'rpn_iou_reg': iou_reg,
            'rpn_iog_reg': iog_reg,
            'rpn_iod_reg': iod_reg,
            # cls
            'rpn_iou_scores': iou_scores,
            'rpn_iog_scores': iog_scores,
            'rpn_iod_scores': iod_scores,
            'rpn_cls_scores': rpn_cls_scores
        }

        return predict_dict

    def append_gt(self, rois_batch, gt_boxes):
        ################################
        # append gt_boxes to rois_batch for losses
        ################################
        # may be some bugs here
        gt_boxes_append = torch.zeros(gt_boxes.shape[0], gt_boxes.shape[1],
                                      5).type_as(gt_boxes)
        gt_boxes_append[:, :, 1:5] = gt_boxes[:, :, :4]
        # cat gt_boxes to rois_batch
        rois_batch = torch.cat([rois_batch, gt_boxes_append], dim=1)
        return rois_batch

    def loss(self, prediction_dict, feed_dict):
        # loss for cls
        loss_dict = {}
        gt_boxes = feed_dict['gt_boxes']
        anchors = prediction_dict['anchors']
        assert len(anchors) == 1, 'just one feature maps is supported now'
        anchors = anchors[0]

        #################################
        # target assigner
        ################################
        # no need gt labels here,it just a binary classifcation problem
        rpn_cls_targets, rpn_reg_targets, \
            rpn_cls_weights, rpn_reg_weights = \
            self.target_assigner.assign(anchors,
                                        gt_boxes,
                                        gt_labels=None,
                                        input_size=feed_dict['input_size'])

        ################################
        # subsample
        ################################
        rpn_cls_probs = prediction_dict['rpn_iou_final']
        cls_criterion = rpn_cls_probs
        pos_indicator = rpn_reg_weights > 0
        indicator = rpn_cls_weights > 0

        batch_sampled_mask = self.sampler.subsample_batch(
            self.rpn_batch_size,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)
        batch_sampled_mask = batch_sampled_mask.type_as(rpn_cls_weights)
        rpn_cls_weights = rpn_cls_weights * batch_sampled_mask
        rpn_reg_weights = rpn_reg_weights * batch_sampled_mask
        num_cls_coeff = (rpn_cls_weights > 0).sum(dim=1)
        num_reg_coeff = (rpn_reg_weights > 0).sum(dim=1)
        # check
        #  assert num_cls_coeff, 'bug happens'
        #  assert num_reg_coeff, 'bug happens'
        if num_cls_coeff == 0:
            num_cls_coeff = torch.ones([]).type_as(num_cls_coeff)
        if num_reg_coeff == 0:
            num_reg_coeff = torch.ones([]).type_as(num_reg_coeff)

        # iou loss
        iou_scores = prediction_dict['rpn_iou_scores']
        iou = prediction_dict['rpn_iou_reg']
        iou_reg_targets = self.target_assigner.matcher.assigned_overlaps_batch
        iou_reg_targets_encode = self.iox_bbox_coder.encode_reg(
            iou_reg_targets)
        iou_reg_loss = self.reg_loss(iou, iou_reg_targets_encode).sum(dim=-1)

        iou_scores_targets = self.iox_bbox_coder.encode_cls(iou_reg_targets)
        iou_cls_loss = self.cls_loss(iou_scores.view(-1, 4),
                                     iou_scores_targets.view(-1))
        iou_cls_loss = iou_cls_loss.view_as(rpn_cls_weights)
        iou_cls_loss = iou_cls_loss.mean(dim=1)
        iou_reg_loss = iou_reg_loss.mean(dim=1)

        # iog loss
        iog_scores = prediction_dict['rpn_iog_scores']
        iog = prediction_dict['rpn_iog_reg']
        iog_reg_targets = self.target_assigner.matcher.assigned_iog_batch
        iog_reg_targets_encode = self.iox_bbox_coder.encode_reg(
            iog_reg_targets)
        iog_reg_loss = self.reg_loss(iog, iog_reg_targets_encode).sum(dim=-1)

        iog_scores_targets = self.iox_bbox_coder.encode_cls(iog_reg_targets)
        iog_cls_loss = self.cls_loss(iog_scores.view(-1, 4),
                                     iog_scores_targets.view(-1))
        iog_cls_loss = iog_cls_loss.view_as(rpn_cls_weights)
        iog_cls_loss = iog_cls_loss.mean(dim=1)
        iog_reg_loss = iog_reg_loss.mean(dim=1)

        # iod loss
        iod_scores = prediction_dict['rpn_iod_scores']
        iod = prediction_dict['rpn_iod_reg']
        iod_reg_targets = self.target_assigner.matcher.assigned_iod_batch
        iod_reg_targets_encode = self.iox_bbox_coder.encode_reg(
            iod_reg_targets)
        iod_reg_loss = self.reg_loss(iod, iod_reg_targets_encode).sum(dim=-1)

        iod_scores_targets = self.iox_bbox_coder.encode_cls(iod_reg_targets)
        iod_cls_loss = self.cls_loss(iod_scores.view(-1, 4),
                                     iod_scores_targets.view(-1))
        iod_cls_loss = iod_cls_loss.view_as(rpn_cls_weights)
        iod_cls_loss = iod_cls_loss.mean(dim=1)
        iod_reg_loss = iod_reg_loss.mean(dim=1)

        # cls loss
        if self.use_cls_pred:
            rpn_cls_score = prediction_dict['rpn_cls_scores']
            rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score.view(-1, 2),
                                             rpn_cls_targets.view(-1))
            rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
            rpn_cls_loss *= rpn_cls_weights
            rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()
            loss_dict['rpn/cls_loss'] = rpn_cls_loss
        loss_dict['rpn/iou_cls_loss'] = iou_cls_loss
        loss_dict['rpn/iou_reg_loss'] = iou_reg_loss
        loss_dict['rpn/iog_cls_loss'] = iog_cls_loss
        loss_dict['rpn/iog_reg_loss'] = iog_reg_loss
        loss_dict['rpn/iod_reg_loss'] = iod_reg_loss
        loss_dict['rpn/iod_cls_loss'] = iod_cls_loss

        # bbox loss
        # shape(N,num,4)
        rpn_bbox_preds = prediction_dict['rpn_bbox_preds']
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        rpn_reg_loss = self.rpn_bbox_loss(rpn_bbox_preds, rpn_reg_targets)
        rpn_reg_loss *= rpn_reg_weights.unsqueeze(-1).expand(-1, -1, 4)
        rpn_reg_loss = rpn_reg_loss.view(
            rpn_reg_loss.shape[0], -1).sum(dim=1) / num_reg_coeff.float()

        # loss_dict['rpn_cls_loss'] = iox_loss
        loss_dict['rpn/bbox_loss'] = rpn_reg_loss
        # loss_dict['iox_loss'] = iox_loss
        return loss_dict
Example #14
0
class SSDModel(Model):
    def init_param(self, model_config):
        self.feature_extractor_config = model_config['feature_extractor_config']
        self.multibox_cfg = [3, 3, 3, 3, 3, 3]
        self.n_classes = len(model_config['classes'])
        self.sampler = DetectionSampler(model_config['sampler_config'])
        self.batch_size = model_config['batch_size']
        self.use_focal_loss = model_config['use_focal_loss']
        # self.multibox_cfg = model_config['multibox_config']

        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # import ipdb
        # ipdb.set_trace()
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])

        self.bbox_coder = self.target_assigner.bbox_coder

        # self.priorsbox = PriorBox(model_config['anchor_generator_config'])

    def init_modules(self):
        self.feature_extractor = PyramidVggnetExtractor(
            self.feature_extractor_config)

        # loc layers and conf layers
        base_feat = self.feature_extractor.base_feat
        extra_layers = self.feature_extractor.extras_layers
        loc_layers, conf_layers = self.make_multibox(base_feat, extra_layers)
        self.loc_layers = loc_layers
        self.conf_layers = conf_layers

        # self.rcnn_3d_preds = nn.Linear()

        # loss layers
        self.loc_loss = nn.SmoothL1Loss(reduce=False)

        if self.use_focal_loss:
            self.conf_loss = FocalLoss(
                self.n_classes, alpha=0.2, gamma=2, auto_alpha=False)
        else:
            self.conf_loss = nn.CrossEntropyLoss(reduce=False)

    def make_multibox(self, vgg, extra_layers):
        cfg = self.multibox_cfg
        num_classes = self.n_classes
        loc_layers = []
        conf_layers = []
        vgg_source = [21, -2]
        for k, v in enumerate(vgg_source):
            loc_layers += [
                nn.Conv2d(
                    vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)
            ]
            conf_layers += [
                nn.Conv2d(
                    vgg[v].out_channels,
                    cfg[k] * num_classes,
                    kernel_size=3,
                    padding=1)
            ]
        for k, v in enumerate(extra_layers[1::2], 2):
            loc_layers += [
                nn.Conv2d(
                    v.out_channels, cfg[k] * 4, kernel_size=3, padding=1)
            ]
            conf_layers += [
                nn.Conv2d(
                    v.out_channels,
                    cfg[k] * num_classes,
                    kernel_size=3,
                    padding=1)
            ]
        return nn.ModuleList(loc_layers), nn.ModuleList(conf_layers)

    def init_weights(self):
        pass

    def forward(self, feed_dict):
        img = feed_dict['img']
        source_feats = self.feature_extractor(img)
        loc_preds = []
        conf_preds = []

        featmap_shapes = []

        # apply multibox head to source layers
        for (x, l, c) in zip(source_feats, self.loc_layers, self.conf_layers):
            loc_preds.append(l(x).permute(0, 2, 3, 1).contiguous())
            conf_preds.append(c(x).permute(0, 2, 3, 1).contiguous())
            featmap_shapes.append(x.size()[-2:])

        # import ipdb
        # ipdb.set_trace()
        loc_preds = torch.cat([o.view(o.size(0), -1) for o in loc_preds], 1)
        conf_preds = torch.cat([o.view(o.size(0), -1) for o in conf_preds], 1)
        probs = F.softmax(
            conf_preds.view(conf_preds.size(0), -1, self.n_classes), dim=-1)
        loc_preds = loc_preds.view(loc_preds.size(0), -1, 4)

        # import ipdb
        # ipdb.set_trace()
        anchors = self.anchor_generator.generate_pyramid(featmap_shapes)
        # anchors = self.priorsbox.forward(featmap_shapes)

        # import ipdb
        # ipdb.set_trace()
        rois_batch_inds = torch.zeros_like(loc_preds[:, :, -1:])
        rois_batch = torch.cat([rois_batch_inds, anchors.unsqueeze(0)], dim=-1)
        second_rpn_anchors = anchors.unsqueeze(0)

        rcnn_3d = torch.zeros_like(loc_preds)

        prediction_dict = {
            'rcnn_bbox_preds': loc_preds,
            'rcnn_cls_scores': conf_preds,
            'anchors': anchors,
            'rcnn_cls_probs': probs,
            'rois_batch': rois_batch,
            'second_rpn_anchors': second_rpn_anchors,
            'rcnn_3d': rcnn_3d
        }
        return prediction_dict

    def loss(self, prediction_dict, feed_dict):
        # import ipdb
        # ipdb.set_trace()
        # loss for cls
        loss_dict = {}

        gt_boxes = feed_dict['gt_boxes']

        anchors = prediction_dict['anchors']

        #################################
        # target assigner
        ################################
        # no need gt labels here,it just a binary classifcation problem
        # import ipdb
        # ipdb.set_trace()
        rpn_cls_targets, rpn_reg_targets, \
            rpn_cls_weights, rpn_reg_weights = \
            self.target_assigner.assign(anchors, gt_boxes, gt_labels=None)

        ################################
        # subsample
        ################################

        pos_indicator = rpn_reg_weights > 0
        indicator = rpn_cls_weights > 0

        rpn_cls_probs = prediction_dict['rcnn_cls_probs'][:, :, 1]
        cls_criterion = rpn_cls_probs

        batch_sampled_mask = self.sampler.subsample_batch(
            self.batch_size,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)
        batch_sampled_mask = batch_sampled_mask.type_as(rpn_cls_weights)
        rpn_cls_weights = rpn_cls_weights * batch_sampled_mask
        rpn_reg_weights = rpn_reg_weights * batch_sampled_mask
        num_cls_coeff = (rpn_cls_weights > 0).sum(dim=1)
        num_reg_coeff = (rpn_reg_weights > 0).sum(dim=1)
        # check
        #  assert num_cls_coeff, 'bug happens'
        #  assert num_reg_coeff, 'bug happens'
        if num_cls_coeff == 0:
            num_cls_coeff = torch.ones([]).type_as(num_cls_coeff)
        if num_reg_coeff == 0:
            num_reg_coeff = torch.ones([]).type_as(num_reg_coeff)

        # cls loss
        rpn_cls_score = prediction_dict['rcnn_cls_scores']
        # rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score, rpn_cls_targets)
        rpn_cls_loss = self.conf_loss(
            rpn_cls_score.view(-1, 2), rpn_cls_targets.view(-1))
        rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
        rpn_cls_loss *= rpn_cls_weights
        rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()

        # bbox loss
        # shape(N,num,4)
        rpn_bbox_preds = prediction_dict['rcnn_bbox_preds']
        # rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        # rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        # import ipdb
        # ipdb.set_trace()
        rpn_reg_loss = self.loc_loss(rpn_bbox_preds, rpn_reg_targets)
        rpn_reg_loss *= rpn_reg_weights.unsqueeze(-1).expand(-1, -1, 4)
        rpn_reg_loss = rpn_reg_loss.view(rpn_reg_loss.shape[0], -1).sum(
            dim=1) / num_reg_coeff.float()

        prediction_dict['rcnn_reg_weights'] = rpn_reg_weights[
            batch_sampled_mask > 0]

        loss_dict['rpn_cls_loss'] = rpn_cls_loss
        loss_dict['rpn_bbox_loss'] = rpn_reg_loss

        # recall
        final_boxes = self.bbox_coder.decode_batch(rpn_bbox_preds, anchors)
        self.target_assigner.assign(final_boxes, gt_boxes)
        return loss_dict
class RPNModel(Model):
    def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        self.rpn_batch_size = model_config['rpn_batch_size']
        self.use_focal_loss = model_config['use_focal_loss']

        # sampler
        # self.sampler = HardNegativeSampler(model_config['sampler_config'])
        # self.sampler = BalancedSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = TargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

        self.use_iou = model_config.get('use_iou')

    def init_weights(self):
        self.truncated = False

        Filler.normal_init(self.rpn_conv_cls, 0, 0.01, self.truncated)
        # Filler.normal_init(self.rpn_conv_bbox, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_cls_score, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_bbox_pred, 0, 0.01, self.truncated)

    def init_modules(self):
        # define the convrelu layers processing input feature map
        self.rpn_conv_cls = nn.Conv2d(
            self.in_channels, 512, 3, 1, 1, bias=True)
        # self.rpn_conv_bbox = nn.Conv2d(
        # self.in_channels, 512, 3, 1, 1, bias=True)

        # define bg/fg classifcation score layer
        self.rpn_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)

        # define anchor box offset prediction layer

        if self.use_score:
            bbox_feat_channels = 512 + 2
            self.nc_bbox_out /= self.num_anchors
        else:
            bbox_feat_channels = 512
        self.rpn_bbox_pred = nn.Conv2d(bbox_feat_channels, self.nc_bbox_out, 1,
                                       1, 0)

        # bbox
        self.rpn_bbox_loss = nn.modules.loss.SmoothL1Loss(reduce=False)

        # cls
        if self.use_focal_loss:
            self.rpn_cls_loss = FocalLoss(
                2, alpha=0.2, gamma=2, auto_alpha=False)
        else:
            self.rpn_cls_loss = functools.partial(
                F.cross_entropy, reduce=False)

    def generate_new_anchors(self, anchors):
        # import ipdb
        # ipdb.set_trace()
        anchor_size = 2
        # anchors_w = anchors[:, :, 2] - anchors[:, :, 0] + 1
        # anchors_h = anchors[:, :, 3] - anchors[:, :, 1] + 1
        center_x = (anchors[:, 2] + anchors[:, 0]) / 2
        center_y = (anchors[:, 3] + anchors[:, 1]) / 2

        # new anchors has the same center as old ones
        min_x = center_x - (anchor_size - 1) / 2
        min_y = center_y - (anchor_size - 1) / 2
        max_x = center_x + (anchor_size - 1) / 2
        max_y = center_y + (anchor_size - 1) / 2
        return torch.stack([min_x, min_y, max_x, max_y], dim=-1)

    def generate_proposal(self, rpn_cls_probs, anchors, rpn_bbox_preds,
                          im_info):
        # TODO create a new Function
        """
        Args:
        rpn_cls_probs: FloatTensor,shape(N,2*num_anchors,H,W)
        rpn_bbox_preds: FloatTensor,shape(N,num_anchors*4,H,W)
        anchors: FloatTensor,shape(N,4,H,W)

        Returns:
        proposals_batch: FloatTensor, shape(N,post_nms_topN,4)
        fg_probs_batch: FloatTensor, shape(N,post_nms_topN)
        """
        # assert len(
        # rpn_bbox_preds) == 1, 'just one feature maps is supported now'
        # rpn_bbox_preds = rpn_bbox_preds[0]
        # do not backward
        anchors = anchors
        rpn_cls_probs = rpn_cls_probs.detach()
        rpn_bbox_preds = rpn_bbox_preds.detach()

        batch_size = rpn_bbox_preds.shape[0]
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(batch_size, -1, 4)
        # apply deltas to anchors to decode
        # loop here due to many features maps
        # proposals = []
        # for rpn_bbox_preds_single_map, anchors_single_map in zip(
        # rpn_bbox_preds, anchors):
        # proposals.append(
        # self.bbox_coder.decode(rpn_bbox_preds_single_map,
        # anchors_single_map))
        # proposals = torch.cat(proposals, dim=1)

        # make anchors small

        new_anchors = self.generate_new_anchors(anchors)
        proposals = self.bbox_coder.decode_batch(rpn_bbox_preds, new_anchors)

        # filer and clip
        proposals = box_ops.clip_boxes(proposals, im_info)

        # fg prob
        fg_probs = rpn_cls_probs[:, self.num_anchors:, :, :]
        fg_probs = fg_probs.permute(0, 2, 3, 1).contiguous().view(batch_size,
                                                                  -1)

        # sort fg
        _, fg_probs_order = torch.sort(fg_probs, dim=1, descending=True)

        # fg_probs_batch = torch.zeros(batch_size,
        # self.post_nms_topN).type_as(rpn_cls_probs)
        proposals_batch = torch.zeros(batch_size, self.post_nms_topN,
                                      4).type_as(rpn_bbox_preds)
        proposals_order = torch.zeros(
            batch_size, self.post_nms_topN).fill_(-1).type_as(fg_probs_order)

        for i in range(batch_size):
            proposals_single = proposals[i]
            fg_probs_single = fg_probs[i]
            fg_order_single = fg_probs_order[i]
            # pre nms
            if self.pre_nms_topN > 0:
                fg_order_single = fg_order_single[:self.pre_nms_topN]
            proposals_single = proposals_single[fg_order_single]
            fg_probs_single = fg_probs_single[fg_order_single]

            # nms
            keep_idx_i = nms(
                torch.cat((proposals_single, fg_probs_single.unsqueeze(1)), 1),
                self.nms_thresh)
            keep_idx_i = keep_idx_i.long().view(-1)

            # post nms
            if self.post_nms_topN > 0:
                keep_idx_i = keep_idx_i[:self.post_nms_topN]
            proposals_single = proposals_single[keep_idx_i, :]
            fg_probs_single = fg_probs_single[keep_idx_i]
            fg_order_single = fg_order_single[keep_idx_i]

            # padding 0 at the end.
            num_proposal = keep_idx_i.numel()
            proposals_batch[i, :num_proposal, :] = proposals_single
            # fg_probs_batch[i, :num_proposal] = fg_probs_single
            proposals_order[i, :num_proposal] = fg_order_single
        return proposals_batch, proposals_order

    def forward(self, bottom_blobs):
        base_feat = bottom_blobs['base_feat']
        batch_size = base_feat.shape[0]
        gt_boxes = bottom_blobs['gt_boxes']
        im_info = bottom_blobs['im_info']

        # separate cls featmap and bbox featmap
        # rpn conv
        rpn_conv_cls = F.relu(self.rpn_conv_cls(base_feat), inplace=True)

        # rpn cls score
        # shape(N,2*num_anchors,H,W)
        rpn_cls_scores = self.rpn_cls_score(rpn_conv_cls)

        # rpn cls prob shape(N,2*num_anchors,H,W)
        rpn_cls_score_reshape = rpn_cls_scores.view(batch_size, 2, -1)
        rpn_cls_probs = F.softmax(rpn_cls_score_reshape, dim=1)
        rpn_cls_probs = rpn_cls_probs.view_as(rpn_cls_scores)
        # import ipdb
        # ipdb.set_trace()

        # rpn bbox pred
        # shape(N,4*num_anchors,H,W)
        # if self.use_score:
        # # shape (N,2,num_anchoros*H*W)
        # rpn_cls_scores = rpn_cls_score_reshape.permute(0, 2, 1)
        # rpn_bbox_preds = []
        # for i in range(self.num_anchors):
        # rpn_bbox_feat = torch.cat(
        # [rpn_conv, rpn_cls_scores[:, ::self.num_anchors, :, :]],
        # dim=1)
        # rpn_bbox_preds.append(self.rpn_bbox_pred(rpn_bbox_feat))
        # rpn_bbox_preds = torch.cat(rpn_bbox_preds, dim=1)
        # else:
        # # get rpn offsets to the anchor boxes
        # rpn_bbox_preds = self.rpn_bbox_pred(rpn_conv)
        # # rpn_bbox_preds = [rpn_bbox_preds]

        # rpn_conv_bbox = F.relu(self.rpn_conv_bbox(base_feat), inplace=True)
        # shared with cls
        rpn_bbox_preds = self.rpn_bbox_pred(rpn_conv_cls)

        # generate anchors
        feature_map_list = [base_feat.size()[-2:]]
        anchors = self.anchor_generator.generate(
            feature_map_list, input_size=im_info[0][:-1])

        ###############################
        # Proposal
        ###############################
        # note that proposals_order is used for track transform of propsoals
        proposals_batch, proposals_order = self.generate_proposal(
            rpn_cls_probs, anchors, rpn_bbox_preds, im_info)
        batch_idx = torch.arange(batch_size).view(batch_size, 1).expand(
            -1, proposals_batch.shape[1]).type_as(proposals_batch)
        rois_batch = torch.cat((batch_idx.unsqueeze(-1), proposals_batch),
                               dim=2)

        rpn_cls_scores = rpn_cls_scores.view(batch_size, 2, -1,
                                             rpn_cls_scores.shape[2],
                                             rpn_cls_scores.shape[3])
        rpn_cls_scores = rpn_cls_scores.permute(
            0, 3, 4, 2, 1).contiguous().view(batch_size, -1, 2)

        # postprocess
        rpn_cls_probs = rpn_cls_probs.view(
            batch_size, 2, -1, rpn_cls_probs.shape[2], rpn_cls_probs.shape[3])
        rpn_cls_probs = rpn_cls_probs.permute(0, 3, 4, 2, 1).contiguous().view(
            batch_size, -1, 2)
        predict_dict = {
            'proposals_batch': proposals_batch,
            'rpn_cls_scores': rpn_cls_scores,
            'rois_batch': rois_batch,
            'anchors': anchors,

            # used for loss
            'rpn_bbox_preds': rpn_bbox_preds,
            'rpn_cls_probs': rpn_cls_probs,
            'proposals_order': proposals_order,
        }

        return predict_dict

    def loss(self, prediction_dict, feed_dict):
        # loss for cls
        loss_dict = {}

        gt_boxes = feed_dict['gt_boxes']

        anchors = prediction_dict['anchors']

        # small anchors
        new_anchors = self.generate_new_anchors(anchors)

        # assert len(anchors) == 1, 'just one feature maps is supported now'
        # anchors = anchors[0]

        #################################
        # target assigner
        ################################
        # no need gt labels here,it just a binary classifcation problem
        #  import ipdb
        #  ipdb.set_trace()
        rpn_cls_targets, rpn_reg_targets, \
            rpn_cls_weights, rpn_reg_weights, stats = \
            self.target_assigner.assign(anchors, gt_boxes, new_anchors, gt_labels=None)

        ################################
        # subsample
        ################################

        pos_indicator = rpn_reg_weights > 0
        indicator = rpn_cls_weights > 0

        rpn_cls_probs = prediction_dict['rpn_cls_probs'][:, :, 1]
        cls_criterion = rpn_cls_probs

        batch_sampled_mask = self.sampler.subsample_batch(
            self.rpn_batch_size,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)
        batch_sampled_mask = batch_sampled_mask.type_as(rpn_cls_weights)
        rpn_cls_weights = rpn_cls_weights * batch_sampled_mask
        rpn_reg_weights = rpn_reg_weights * batch_sampled_mask
        num_cls_coeff = (rpn_cls_weights > 0).sum(dim=1)
        num_reg_coeff = (rpn_reg_weights > 0).sum(dim=1)
        # check
        #  assert num_cls_coeff, 'bug happens'
        #  assert num_reg_coeff, 'bug happens'
        if num_cls_coeff == 0:
            num_cls_coeff = torch.ones([]).type_as(num_cls_coeff)
        if num_reg_coeff == 0:
            num_reg_coeff = torch.ones([]).type_as(num_reg_coeff)

        # cls loss
        rpn_cls_score = prediction_dict['rpn_cls_scores']
        # rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score, rpn_cls_targets)
        rpn_cls_loss = self.rpn_cls_loss(
            rpn_cls_score.view(-1, 2), rpn_cls_targets.view(-1))
        rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
        rpn_cls_loss *= rpn_cls_weights
        rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()

        # bbox loss
        # shape(N,num,4)
        rpn_bbox_preds = prediction_dict['rpn_bbox_preds']
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        rpn_reg_loss = self.rpn_bbox_loss(rpn_bbox_preds, rpn_reg_targets)
        rpn_reg_loss *= rpn_reg_weights.unsqueeze(-1).expand(-1, -1, 4)
        rpn_reg_loss = rpn_reg_loss.view(rpn_reg_loss.shape[0], -1).sum(
            dim=1) / num_reg_coeff.float()

        loss_dict['rpn_cls_loss'] = rpn_cls_loss
        loss_dict['rpn_bbox_loss'] = rpn_reg_loss
        return loss_dict
Example #16
0
class DistanceRPNModel(Model):
    def init_param(self, model_config):
        self.in_channels = model_config['din']
        self.post_nms_topN = model_config['post_nms_topN']
        self.pre_nms_topN = model_config['pre_nms_topN']
        self.nms_thresh = model_config['nms_thresh']
        self.use_score = model_config['use_score']
        self.rpn_batch_size = model_config['rpn_batch_size']
        self.use_focal_loss = model_config['use_focal_loss']

        # sampler
        #  self.sampler = HardNegativeSampler(model_config['sampler_config'])
        self.sampler = DetectionSampler(model_config['sampler_config'])
        # can not use hem here
        #  self.sampler = BalancedSampler(model_config['sampler_config'])

        # anchor generator
        self.anchor_generator = AnchorGenerator(
            model_config['anchor_generator_config'])
        self.num_anchors = self.anchor_generator.num_anchors
        self.nc_bbox_out = 4 * self.num_anchors
        self.nc_score_out = self.num_anchors * 2

        # target assigner
        self.target_assigner = DistanceTargetAssigner(
            model_config['target_assigner_config'])

        # bbox coder
        self.bbox_coder = self.target_assigner.bbox_coder

    def init_weights(self):
        self.truncated = False

        Filler.normal_init(self.rpn_conv, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_cls_score, 0, 0.01, self.truncated)
        Filler.normal_init(self.rpn_bbox_pred, 0, 0.01, self.truncated)

    def init_modules(self):
        # define the convrelu layers processing input feature map
        self.rpn_conv = nn.Conv2d(self.in_channels, 512, 3, 1, 1, bias=True)

        # define bg/fg classifcation score layer
        self.rpn_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)

        # define anchor box offset prediction layer

        if self.use_score:
            bbox_feat_channels = 512 + 2
            self.nc_bbox_out /= self.num_anchors
        else:
            bbox_feat_channels = 512
        self.rpn_bbox_pred = nn.Conv2d(bbox_feat_channels, self.nc_bbox_out, 1,
                                       1, 0)

        # bbox
        self.rpn_bbox_loss = nn.modules.loss.SmoothL1Loss(reduce=False)

        # cls
        if self.use_focal_loss:
            self.rpn_cls_loss = FocalLoss(2)
        else:
            self.rpn_cls_loss = functools.partial(
                F.cross_entropy, reduce=False)
        #  self.rpn_cls_loss = nn.MSELoss(reduce=False)

        # self.distance_similarity_calc = DistanceSimilarityCalc()

    def generate_proposal(self, rpn_cls_probs, anchors, rpn_bbox_preds,
                          im_info):
        # TODO create a new Function
        """
        Args:
            rpn_cls_probs: FloatTensor,shape(N,2*num_anchors,H,W)
            rpn_bbox_preds: FloatTensor,shape(N,num_anchors*4,H,W)
            anchors: FloatTensor,shape(N,4,H,W)

        Returns:
            proposals_batch: FloatTensor, shape(N,post_nms_topN,4)
            fg_probs_batch: FloatTensor, shape(N,post_nms_topN)
        """
        # assert len(
        # rpn_bbox_preds) == 1, 'just one feature maps is supported now'
        # rpn_bbox_preds = rpn_bbox_preds[0]
        anchors = anchors[0]
        # do not backward
        anchors = anchors
        rpn_cls_probs = rpn_cls_probs.detach()
        rpn_bbox_preds = rpn_bbox_preds.detach()

        batch_size = rpn_bbox_preds.shape[0]
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(batch_size, -1, 4)

        # fg prob
        gate = rpn_cls_probs[:, self.num_anchors:, :, :]
        gate = gate.permute(0, 2, 3, 1).contiguous().view(batch_size, -1)
        fg_probs, distance = self.get_rpn_cls_probs(
            rpn_bbox_preds, anchors=None)
        fg_probs[gate < 0.5] = 0
        distance[gate < 0.5] = 1e5

        # apply deltas to anchors to decode
        # loop here due to many features maps
        # proposals = []
        # for rpn_bbox_preds_single_map, anchors_single_map in zip(
        # rpn_bbox_preds, anchors):
        # proposals.append(
        # self.bbox_coder.decode(rpn_bbox_preds_single_map,
        # anchors_single_map))
        # proposals = torch.cat(proposals, dim=1)

        proposals = self.bbox_coder.decode_batch(rpn_bbox_preds, anchors)

        # filer and clip
        proposals = box_ops.clip_boxes(proposals, im_info)

        # fg prob

        # sort fg
        _, fg_probs_order = torch.sort(fg_probs, dim=1, descending=True)

        # fg_probs_batch = torch.zeros(batch_size,
        # self.post_nms_topN).type_as(rpn_cls_probs)
        proposals_batch = torch.zeros(batch_size, self.post_nms_topN,
                                      4).type_as(rpn_bbox_preds)
        proposals_order = torch.zeros(
            batch_size, self.post_nms_topN).fill_(-1).type_as(fg_probs_order)

        for i in range(batch_size):
            proposals_single = proposals[i]
            fg_probs_single = fg_probs[i]
            fg_order_single = fg_probs_order[i]
            # pre nms
            if self.pre_nms_topN > 0:
                fg_order_single = fg_order_single[:self.pre_nms_topN]
            proposals_single = proposals_single[fg_order_single]
            fg_probs_single = fg_probs_single[fg_order_single]

            # nms
            keep_idx_i = nms(
                torch.cat((proposals_single, fg_probs_single.unsqueeze(1)), 1),
                self.nms_thresh)
            keep_idx_i = keep_idx_i.long().view(-1)

            # post nms
            if self.post_nms_topN > 0:
                keep_idx_i = keep_idx_i[:self.post_nms_topN]
            proposals_single = proposals_single[keep_idx_i, :]
            fg_probs_single = fg_probs_single[keep_idx_i]
            fg_order_single = fg_order_single[keep_idx_i]

            # padding 0 at the end.
            num_proposal = keep_idx_i.numel()
            proposals_batch[i, :num_proposal, :] = proposals_single
            # fg_probs_batch[i, :num_proposal] = fg_probs_single
            proposals_order[i, :num_proposal] = fg_order_single

        #  row = torch.arange(0, batch_size).type_as(proposals_order)
        #  fg_probs = fg_probs[row, proposals_order.view(-1)].view_as(
        #  proposals_order)
        return proposals_batch, proposals_order, fg_probs, distance

    def get_rpn_cls_probs(self, bbox_pred, anchors=None):
        """
        Note that all inputs have no gradients
        Args:
            bbox_pred: shape (N,M,4)
            anchors: shape (M,4)
        Returns:
            distance: shape(N,M)
        """
        # shape(N,M,4)
        # distances = self.distance_similarity_calc.compare_batch(bbox, gt_boxes)
        # anchors = anchors.expand_as(bbox_pred)
        # widths = anchors[:, :, 2] - anchors[:, :, 0] + 1.0
        # heights = anchors[:, :, 3] - anchors[:, :, 1] + 1.0
        # dx = bbox_pred[:, :, 0] * widths
        # dy = bbox_pred[:, :, 1] * heights
        dx = bbox_pred[:, :, 0]
        dy = bbox_pred[:, :, 1]
        distance = torch.sqrt(dx * dx + dy * dy)
        theta = 1e-5
        return 1.0 / (distance + theta), distance

    def forward(self, bottom_blobs):
        base_feat = bottom_blobs['base_feat']
        batch_size = base_feat.shape[0]
        gt_boxes = bottom_blobs['gt_boxes']
        im_info = bottom_blobs['im_info']

        # rpn conv
        rpn_conv = F.relu(self.rpn_conv(base_feat), inplace=True)

        # rpn bbox pred
        # shape(N,4*num_anchors,H,W)
        rpn_bbox_preds = self.rpn_bbox_pred(rpn_conv)

        # generate anchors
        feature_map_list = [base_feat.size()[-2:]]
        anchors = self.anchor_generator.generate(feature_map_list)

        rpn_cls_scores = self.rpn_cls_score(rpn_conv)

        # softmax
        rpn_cls_score_reshape = rpn_cls_scores.view(batch_size, 2, -1)
        rpn_cls_probs = F.softmax(rpn_cls_score_reshape, dim=1)
        rpn_cls_probs = rpn_cls_probs.view_as(rpn_cls_scores)
        # use distance to instead of rpn_cls_probs
        #  rpn_cls_probs = self.get_rpn_cls_probs(rpn_bbox_preds)

        ###############################
        # Proposal
        ###############################
        # note that proposals_order is used for track transform of propsoals
        proposals_batch, proposals_order, fg_probs, distance = self.generate_proposal(
            rpn_cls_probs, anchors, rpn_bbox_preds, im_info)
        batch_idx = torch.arange(batch_size).view(batch_size, 1).expand(
            -1, proposals_batch.shape[1]).type_as(proposals_batch)
        rois_batch = torch.cat((batch_idx.unsqueeze(-1), proposals_batch),
                               dim=2)

        if self.training:
            rois_batch = self.append_gt(rois_batch, gt_boxes)

        # postprocess
        rpn_cls_scores = rpn_cls_scores.view(batch_size, 2, -1,
                                             rpn_cls_scores.shape[2],
                                             rpn_cls_scores.shape[3])
        rpn_cls_scores = rpn_cls_scores.permute(
            0, 3, 4, 2, 1).contiguous().view(batch_size, -1, 2)

        predict_dict = {
            'proposals_batch': proposals_batch,
            'rois_batch': rois_batch,
            'anchors': anchors,

            # used for loss
            'rpn_bbox_preds': rpn_bbox_preds,
            'rpn_cls_scores': rpn_cls_scores,
            'proposals_order': proposals_order,
            'fg_probs': fg_probs,
            'distance': distance
        }

        return predict_dict

    def append_gt(self, rois_batch, gt_boxes):
        ################################
        # append gt_boxes to rois_batch for losses
        ################################
        # may be some bugs here
        gt_boxes_append = torch.zeros(gt_boxes.shape[0], gt_boxes.shape[1],
                                      5).type_as(gt_boxes)
        gt_boxes_append[:, :, 1:5] = gt_boxes[:, :, :4]
        # cat gt_boxes to rois_batch
        rois_batch = torch.cat([rois_batch, gt_boxes_append], dim=1)
        return rois_batch

    def loss(self, prediction_dict, feed_dict):
        # loss for cls
        loss_dict = {}

        gt_boxes = feed_dict['gt_boxes']

        anchors = prediction_dict['anchors']

        assert len(anchors) == 1, 'just one feature maps is supported now'
        anchors = anchors[0]

        #################################
        # target assigner
        ################################
        # no need gt labels here,it just a binary classifcation problem
        #  import ipdb
        #  ipdb.set_trace()
        rpn_cls_targets, rpn_reg_targets, \
            rpn_cls_weights, rpn_reg_weights = \
            self.target_assigner.assign(anchors, gt_boxes, gt_labels=None)

        ################################
        # subsample
        ################################
        rpn_cls_probs = prediction_dict['fg_probs']
        pos_indicator = rpn_cls_targets > 0
        indicator = rpn_cls_weights > 0
        cls_criterion = rpn_cls_probs

        batch_sampled_mask = self.sampler.subsample_batch(
            self.rpn_batch_size,
            pos_indicator,
            criterion=cls_criterion,
            indicator=indicator)
        batch_sampled_mask = batch_sampled_mask.type_as(rpn_cls_weights)
        rpn_cls_weights = rpn_cls_weights * batch_sampled_mask
        rpn_reg_weights = rpn_reg_weights * batch_sampled_mask
        num_cls_coeff = (rpn_cls_weights > 0).sum(dim=1)
        num_reg_coeff = (rpn_reg_weights > 0).sum(dim=1)
        # check
        #  assert num_cls_coeff, 'bug happens'
        #  assert num_reg_coeff, 'bug happens'
        if num_cls_coeff == 0:
            num_cls_coeff = torch.ones([]).type_as(num_cls_coeff)
        if num_reg_coeff == 0:
            num_reg_coeff = torch.ones([]).type_as(num_reg_coeff)

        # cls loss
        rpn_cls_scores = prediction_dict['rpn_cls_scores']
        rpn_cls_loss = self.rpn_cls_loss(rpn_cls_scores, rpn_cls_targets)
        rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
        rpn_cls_loss *= rpn_cls_weights
        rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()

        #  rpn_cls_probs = prediction_dict['rpn_cls_probs']
        #  fg_rpn_cls_probs = rpn_cls_probs.view(-1, 2)[:, 1]
        #  # exp
        #  fg_rpn_cls_probs = torch.exp(fg_rpn_cls_probs)
        #  rpn_cls_targets = torch.exp(rpn_cls_targets)

        #  # rpn_cls_loss = self.rpn_cls_loss(rpn_cls_score, rpn_cls_targets)
        #  rpn_cls_loss = self.rpn_cls_loss(fg_rpn_cls_probs,
        #  rpn_cls_targets.view(-1))
        #  rpn_cls_loss = rpn_cls_loss.view_as(rpn_cls_weights)
        #  rpn_cls_loss *= rpn_cls_weights
        #  rpn_cls_loss = rpn_cls_loss.sum(dim=1) / num_cls_coeff.float()

        # bbox loss
        # shape(N,num,4)
        rpn_bbox_preds = prediction_dict['rpn_bbox_preds']
        rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous()
        # shape(N,H*W*num_anchors,4)
        rpn_bbox_preds = rpn_bbox_preds.view(rpn_bbox_preds.shape[0], -1, 4)
        rpn_reg_loss = self.rpn_bbox_loss(rpn_bbox_preds, rpn_reg_targets)
        rpn_reg_loss *= rpn_reg_weights.unsqueeze(-1).expand(-1, -1, 4)
        rpn_reg_loss = rpn_reg_loss.view(rpn_reg_loss.shape[0], -1).sum(
            dim=1) / num_reg_coeff.float()

        loss_dict['rpn_cls_loss'] = rpn_cls_loss
        loss_dict['rpn_bbox_loss'] = rpn_reg_loss
        return loss_dict